# Reference: https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/
# Use the following package versions for compatibiltiy
!pip install "PyYAML==5.3" "numpy==1.24.3"
Collecting PyYAML==5.3
Downloading PyYAML-5.3.tar.gz (268 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 268.2/268.2 kB 1.9 MB/s eta 0:00:00
Preparing metadata (setup.py) ... done
Collecting numpy==1.24.3
Downloading numpy-1.24.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (17.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 17.3/17.3 MB 21.6 MB/s eta 0:00:00
Building wheels for collected packages: PyYAML
Building wheel for PyYAML (setup.py) ... done
Created wheel for PyYAML: filename=PyYAML-5.3-cp310-cp310-linux_x86_64.whl size=44244 sha256=4b44aafb375bdd28f6c76220ada95f41936cbb198c453a1f4d24859532ac7318
Stored in directory: /root/.cache/pip/wheels/0d/72/68/a263cfc14175636cf26bada99f13b735be1b60a11318e08bfc
Successfully built PyYAML
Installing collected packages: PyYAML, numpy
Attempting uninstall: PyYAML
Found existing installation: PyYAML 6.0.1
Uninstalling PyYAML-6.0.1:
Successfully uninstalled PyYAML-6.0.1
Attempting uninstall: numpy
Found existing installation: numpy 1.22.4
Uninstalling numpy-1.22.4:
Successfully uninstalled numpy-1.22.4
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
dask 2022.12.1 requires pyyaml>=5.3.1, but you have pyyaml 5.3 which is incompatible.
flax 0.7.0 requires PyYAML>=5.4.1, but you have pyyaml 5.3 which is incompatible.
numba 0.56.4 requires numpy<1.24,>=1.18, but you have numpy 1.24.3 which is incompatible.
tensorflow 2.12.0 requires numpy<1.24,>=1.22, but you have numpy 1.24.3 which is incompatible.
Successfully installed PyYAML-5.3 numpy-1.24.3
import os
import tarfile
from zipfile import ZipFile
# Create a root working directory
if not os.path.exists("Tensorflow"):
os.mkdir("Tensorflow")
# Define model name
MODEL_NAME = "ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8"
# Clone model garden
%cd "Tensorflow"
!git clone "https://github.com/tensorflow/models.git"
/content/Tensorflow Cloning into 'models'... remote: Enumerating objects: 86752, done. remote: Counting objects: 100% (506/506), done. remote: Compressing objects: 100% (244/244), done. remote: Total 86752 (delta 265), reused 484 (delta 256), pack-reused 86246 Receiving objects: 100% (86752/86752), 598.90 MiB | 23.98 MiB/s, done. Resolving deltas: 100% (62139/62139), done.
# Install object detection package
%cd "models/research"
!protoc object_detection/protos/*.proto --python_out=.
%cp object_detection/packages/tf2/setup.py .
!pip install .
/content/Tensorflow/models/research
Processing /content/Tensorflow/models/research
Preparing metadata (setup.py) ... done
Collecting avro-python3 (from object-detection==0.1)
Downloading avro-python3-1.10.2.tar.gz (38 kB)
Preparing metadata (setup.py) ... done
Collecting apache-beam (from object-detection==0.1)
Downloading apache_beam-2.49.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (14.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 14.6/14.6 MB 41.9 MB/s eta 0:00:00
Requirement already satisfied: pillow in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (8.4.0)
Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (4.9.3)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (3.7.1)
Requirement already satisfied: Cython in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (0.29.36)
Requirement already satisfied: contextlib2 in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (21.6.0)
Requirement already satisfied: tf-slim in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.1.0)
Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.16.0)
Requirement already satisfied: pycocotools in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (2.0.6)
Collecting lvis (from object-detection==0.1)
Downloading lvis-0.5.3-py3-none-any.whl (14 kB)
Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.10.1)
Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (1.5.3)
Collecting tf-models-official>=2.5.1 (from object-detection==0.1)
Downloading tf_models_official-2.13.1-py2.py3-none-any.whl (2.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.6/2.6 MB 19.8 MB/s eta 0:00:00
Collecting tensorflow_io (from object-detection==0.1)
Downloading tensorflow_io-0.32.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (28.0 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 28.0/28.0 MB 10.3 MB/s eta 0:00:00
Requirement already satisfied: keras in /usr/local/lib/python3.10/dist-packages (from object-detection==0.1) (2.12.0)
Collecting pyparsing==2.4.7 (from object-detection==0.1)
Downloading pyparsing-2.4.7-py2.py3-none-any.whl (67 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 67.8/67.8 kB 7.6 MB/s eta 0:00:00
Collecting sacrebleu<=2.2.0 (from object-detection==0.1)
Downloading sacrebleu-2.2.0-py3-none-any.whl (116 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 116.6/116.6 kB 10.5 MB/s eta 0:00:00
Collecting portalocker (from sacrebleu<=2.2.0->object-detection==0.1)
Downloading portalocker-2.7.0-py2.py3-none-any.whl (15 kB)
Requirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (2022.10.31)
Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (0.9.0)
Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from sacrebleu<=2.2.0->object-detection==0.1) (1.24.3)
Collecting colorama (from sacrebleu<=2.2.0->object-detection==0.1)
Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)
Requirement already satisfied: gin-config in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Requirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (2.84.0)
Collecting immutabledict (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading immutabledict-3.0.0-py3-none-any.whl (4.0 kB)
Requirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (1.5.16)
Requirement already satisfied: oauth2client in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.1.3)
Requirement already satisfied: opencv-python-headless in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.8.0.74)
Requirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.9.5)
Requirement already satisfied: py-cpuinfo>=3.3.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (9.0.0)
Requirement already satisfied: pyyaml<5.4.0,>=5.1 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (5.3)
Collecting sentencepiece (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.3/1.3 MB 43.4 MB/s eta 0:00:00
Collecting seqeval (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading seqeval-1.2.2.tar.gz (43 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 43.6/43.6 kB 3.7 MB/s eta 0:00:00
Preparing metadata (setup.py) ... done
Requirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (4.9.2)
Requirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from tf-models-official>=2.5.1->object-detection==0.1) (0.14.0)
Collecting tensorflow-model-optimization>=0.4.1 (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading tensorflow_model_optimization-0.7.5-py2.py3-none-any.whl (241 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 241.2/241.2 kB 11.2 MB/s eta 0:00:00
Collecting tensorflow-text~=2.13.0 (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading tensorflow_text-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (6.5 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.5/6.5 MB 83.6 MB/s eta 0:00:00
Collecting tensorflow~=2.13.0 (from tf-models-official>=2.5.1->object-detection==0.1)
Downloading tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (524.1 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 524.1/524.1 MB 3.2 MB/s eta 0:00:00
Requirement already satisfied: python-dateutil>=2.8.1 in /usr/local/lib/python3.10/dist-packages (from pandas->object-detection==0.1) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->object-detection==0.1) (2022.7.1)
Requirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.10/dist-packages (from tf-slim->object-detection==0.1) (1.4.0)
Collecting crcmod<2.0,>=1.7 (from apache-beam->object-detection==0.1)
Downloading crcmod-1.7.tar.gz (89 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 89.7/89.7 kB 9.8 MB/s eta 0:00:00
Preparing metadata (setup.py) ... done
Collecting orjson<4.0 (from apache-beam->object-detection==0.1)
Downloading orjson-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (138 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 138.7/138.7 kB 19.6 MB/s eta 0:00:00
Collecting dill<0.3.2,>=0.3.1.1 (from apache-beam->object-detection==0.1)
Downloading dill-0.3.1.1.tar.gz (151 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 152.0/152.0 kB 22.2 MB/s eta 0:00:00
Preparing metadata (setup.py) ... done
Requirement already satisfied: cloudpickle~=2.2.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (2.2.1)
Collecting fastavro<2,>=0.23.6 (from apache-beam->object-detection==0.1)
Downloading fastavro-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.7 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 88.9 MB/s eta 0:00:00
Collecting fasteners<1.0,>=0.3 (from apache-beam->object-detection==0.1)
Downloading fasteners-0.18-py3-none-any.whl (18 kB)
Requirement already satisfied: grpcio!=1.48.0,<2,>=1.33.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.56.0)
Collecting hdfs<3.0.0,>=2.1.0 (from apache-beam->object-detection==0.1)
Downloading hdfs-2.7.0-py3-none-any.whl (34 kB)
Requirement already satisfied: httplib2<0.23.0,>=0.8 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (0.21.0)
Collecting objsize<0.7.0,>=0.6.1 (from apache-beam->object-detection==0.1)
Downloading objsize-0.6.1-py3-none-any.whl (9.3 kB)
Collecting pymongo<5.0.0,>=3.8.0 (from apache-beam->object-detection==0.1)
Downloading pymongo-4.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (603 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 603.6/603.6 kB 55.7 MB/s eta 0:00:00
Requirement already satisfied: proto-plus<2,>=1.7.1 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.22.3)
Requirement already satisfied: protobuf<4.24.0,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (3.20.3)
Requirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (1.4.2)
Requirement already satisfied: requests<3.0.0,>=2.24.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (2.27.1)
Requirement already satisfied: typing-extensions>=3.7.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (4.7.1)
Collecting zstandard<1,>=0.18.0 (from apache-beam->object-detection==0.1)
Downloading zstandard-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.7 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 103.5 MB/s eta 0:00:00
Requirement already satisfied: pyarrow<12.0.0,>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from apache-beam->object-detection==0.1) (9.0.0)
Requirement already satisfied: cycler>=0.10.0 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (0.11.0)
Requirement already satisfied: kiwisolver>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (1.4.4)
Requirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.10/dist-packages (from lvis->object-detection==0.1) (4.7.0.72)
Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (1.1.0)
Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (4.41.0)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib->object-detection==0.1) (23.1)
Requirement already satisfied: tensorflow-io-gcs-filesystem==0.32.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow_io->object-detection==0.1) (0.32.0)
Requirement already satisfied: google-auth<3.0.0dev,>=1.19.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.17.3)
Requirement already satisfied: google-auth-httplib2>=0.1.0 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (0.1.0)
Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (2.11.1)
Requirement already satisfied: uritemplate<5,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (4.1.1)
Collecting docopt (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1)
Downloading docopt-0.6.2.tar.gz (25 kB)
Preparing metadata (setup.py) ... done
Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (2023.5.7)
Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (4.65.0)
Requirement already satisfied: python-slugify in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (8.0.1)
Requirement already satisfied: urllib3 in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.26.16)
Requirement already satisfied: bleach in /usr/local/lib/python3.10/dist-packages (from kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (6.0.0)
Collecting dnspython<3.0.0,>=1.16.0 (from pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
Downloading dnspython-2.4.0-py3-none-any.whl (300 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 300.0/300.0 kB 33.9 MB/s eta 0:00:00
Requirement already satisfied: charset-normalizer~=2.0.0 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2.0.12)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (3.4)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.6.3)
Requirement already satisfied: flatbuffers>=23.1.21 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (23.5.26)
Requirement already satisfied: gast<=0.4.0,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.2.0)
Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.8.0)
Collecting keras (from object-detection==0.1)
Downloading keras-2.13.1-py3-none-any.whl (1.7 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.7/1.7 MB 76.7 MB/s eta 0:00:00
Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (16.0.6)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.3.0)
Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (67.7.2)
Collecting tensorboard<2.14,>=2.13 (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1)
Downloading tensorboard-2.13.0-py3-none-any.whl (5.6 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.6/5.6 MB 103.8 MB/s eta 0:00:00
Collecting tensorflow-estimator<2.14,>=2.13.0 (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1)
Downloading tensorflow_estimator-2.13.0-py2.py3-none-any.whl (440 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 440.8/440.8 kB 48.8 MB/s eta 0:00:00
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.3.0)
Collecting typing-extensions>=3.7.0 (from apache-beam->object-detection==0.1)
Downloading typing_extensions-4.5.0-py3-none-any.whl (27 kB)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.14.1)
Requirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-official>=2.5.1->object-detection==0.1) (0.1.8)
Requirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (0.5.0)
Requirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (0.3.0)
Requirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from oauth2client->tf-models-official>=2.5.1->object-detection==0.1) (4.9)
Requirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.10/dist-packages (from seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.2.2)
Requirement already satisfied: array-record in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.4.0)
Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (8.1.6)
Requirement already satisfied: etils[enp,epath]>=0.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.3.0)
Requirement already satisfied: promise in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (2.3)
Requirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (1.13.1)
Requirement already satisfied: toml in /usr/local/lib/python3.10/dist-packages (from tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (0.10.2)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.40.0)
Collecting httpcore>=0.17.3 (from dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
Downloading httpcore-0.17.3-py3-none-any.whl (74 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 74.5/74.5 kB 10.8 MB/s eta 0:00:00
Requirement already satisfied: sniffio<2.0,>=1.1 in /usr/local/lib/python3.10/dist-packages (from dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (1.3.0)
Requirement already satisfied: importlib_resources in /usr/local/lib/python3.10/dist-packages (from etils[enp,epath]>=0.9.0->tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (6.0.0)
Requirement already satisfied: zipp in /usr/local/lib/python3.10/dist-packages (from etils[enp,epath]>=0.9.0->tensorflow-datasets->tf-models-official>=2.5.1->object-detection==0.1) (3.16.2)
Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0dev,>=1.31.5->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (1.59.1)
Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3.0.0dev,>=1.19.0->google-api-python-client>=1.6.7->tf-models-official>=2.5.1->object-detection==0.1) (5.3.1)
Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (1.3.1)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official>=2.5.1->object-detection==0.1) (3.2.0)
Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.0.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.4.3)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (0.7.1)
Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.3.6)
Requirement already satisfied: webencodings in /usr/local/lib/python3.10/dist-packages (from bleach->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (0.5.1)
Requirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.10/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official>=2.5.1->object-detection==0.1) (1.3)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (1.3.1)
Collecting h11<0.15,>=0.13 (from httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1)
Downloading h11-0.14.0-py3-none-any.whl (58 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 58.3/58.3 kB 8.6 MB/s eta 0:00:00
Requirement already satisfied: anyio<5.0,>=3.0 in /usr/local/lib/python3.10/dist-packages (from httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (3.7.1)
Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (2.1.3)
Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->httpcore>=0.17.3->dnspython<3.0.0,>=1.16.0->pymongo<5.0.0,>=3.8.0->apache-beam->object-detection==0.1) (1.1.2)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.10/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.14,>=2.13->tensorflow~=2.13.0->tf-models-official>=2.5.1->object-detection==0.1) (3.2.2)
Building wheels for collected packages: object-detection, avro-python3, crcmod, dill, seqeval, docopt
Building wheel for object-detection (setup.py) ... done
Created wheel for object-detection: filename=object_detection-0.1-py3-none-any.whl size=1697202 sha256=b6e79521b46d226100459697c216cefc719934eaa9a9aaefb37cd30f8186256e
Stored in directory: /tmp/pip-ephem-wheel-cache-k9c3ri1o/wheels/fb/c9/43/709f88e66b36649c7a29812ca4f6236f31caed949aabc3e335
Building wheel for avro-python3 (setup.py) ... done
Created wheel for avro-python3: filename=avro_python3-1.10.2-py3-none-any.whl size=43994 sha256=8e646ec9b0ee7e76af10b8f4e87c33ecca4f798002bdc7f3e8bc82b9bbecf3e4
Stored in directory: /root/.cache/pip/wheels/bc/85/62/6cdd81c56f923946b401cecff38055b94c9b766927f7d8ca82
Building wheel for crcmod (setup.py) ... done
Created wheel for crcmod: filename=crcmod-1.7-cp310-cp310-linux_x86_64.whl size=31410 sha256=99977dadd22b83aff8fb3f910e1b94c8dbc1962e1320336bc470a18a95960b77
Stored in directory: /root/.cache/pip/wheels/85/4c/07/72215c529bd59d67e3dac29711d7aba1b692f543c808ba9e86
Building wheel for dill (setup.py) ... done
Created wheel for dill: filename=dill-0.3.1.1-py3-none-any.whl size=78545 sha256=7bf178c253ba17710cd1dee767368f85c7e8a140ee7b1e1c243c3e8f8cd879e4
Stored in directory: /root/.cache/pip/wheels/ea/e2/86/64980d90e297e7bf2ce588c2b96e818f5399c515c4bb8a7e4f
Building wheel for seqeval (setup.py) ... done
Created wheel for seqeval: filename=seqeval-1.2.2-py3-none-any.whl size=16165 sha256=286c2e325597e0f16e3ae7b7f09a2d8987314b80626ebf6c8ceb5cf5a66a0c28
Stored in directory: /root/.cache/pip/wheels/1a/67/4a/ad4082dd7dfc30f2abfe4d80a2ed5926a506eb8a972b4767fa
Building wheel for docopt (setup.py) ... done
Created wheel for docopt: filename=docopt-0.6.2-py2.py3-none-any.whl size=13707 sha256=7e05e464ce2a9dedc4181eedad5da3882e26a97f165d01aaf4124404c464825d
Stored in directory: /root/.cache/pip/wheels/fc/ab/d4/5da2067ac95b36618c629a5f93f809425700506f72c9732fac
Successfully built object-detection avro-python3 crcmod dill seqeval docopt
Installing collected packages: sentencepiece, docopt, crcmod, zstandard, typing-extensions, tensorflow-model-optimization, tensorflow_io, tensorflow-estimator, pyparsing, portalocker, orjson, objsize, keras, immutabledict, h11, fasteners, fastavro, dill, colorama, avro-python3, sacrebleu, httpcore, hdfs, seqeval, lvis, dnspython, tensorboard, pymongo, tensorflow, apache-beam, tensorflow-text, tf-models-official, object-detection
Attempting uninstall: typing-extensions
Found existing installation: typing_extensions 4.7.1
Uninstalling typing_extensions-4.7.1:
Successfully uninstalled typing_extensions-4.7.1
Attempting uninstall: tensorflow-estimator
Found existing installation: tensorflow-estimator 2.12.0
Uninstalling tensorflow-estimator-2.12.0:
Successfully uninstalled tensorflow-estimator-2.12.0
Attempting uninstall: pyparsing
Found existing installation: pyparsing 3.1.0
Uninstalling pyparsing-3.1.0:
Successfully uninstalled pyparsing-3.1.0
Attempting uninstall: keras
Found existing installation: keras 2.12.0
Uninstalling keras-2.12.0:
Successfully uninstalled keras-2.12.0
Attempting uninstall: tensorboard
Found existing installation: tensorboard 2.12.3
Uninstalling tensorboard-2.12.3:
Successfully uninstalled tensorboard-2.12.3
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.12.0
Uninstalling tensorflow-2.12.0:
Successfully uninstalled tensorflow-2.12.0
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
flax 0.7.0 requires PyYAML>=5.4.1, but you have pyyaml 5.3 which is incompatible.
Successfully installed apache-beam-2.49.0 avro-python3-1.10.2 colorama-0.4.6 crcmod-1.7 dill-0.3.1.1 dnspython-2.4.0 docopt-0.6.2 fastavro-1.8.2 fasteners-0.18 h11-0.14.0 hdfs-2.7.0 httpcore-0.17.3 immutabledict-3.0.0 keras-2.13.1 lvis-0.5.3 object-detection-0.1 objsize-0.6.1 orjson-3.9.2 portalocker-2.7.0 pymongo-4.4.1 pyparsing-2.4.7 sacrebleu-2.2.0 sentencepiece-0.1.99 seqeval-1.2.2 tensorboard-2.13.0 tensorflow-2.13.0 tensorflow-estimator-2.13.0 tensorflow-model-optimization-0.7.5 tensorflow-text-2.13.0 tensorflow_io-0.32.0 tf-models-official-2.13.1 typing-extensions-4.5.0 zstandard-0.21.0
# Test if object detection package has been sucessfully installed
!python object_detection/builders/model_builder_tf2_test.py
2023-07-23 20:34:01.517004: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-07-23 20:34:04.562247: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-23 20:34:13.651546: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:14.356004: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:14.356410: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
Running tests under Python 3.10.6: /usr/bin/python3
[ RUN ] ModelBuilderTF2Test.test_create_center_net_deepmac
2023-07-23 20:34:14.393482: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:14.393928: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:14.400725: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:17.570345: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:17.570825: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:17.571102: I tensorflow/compiler/xla/stream_executor/cuda/cuda_gpu_executor.cc:995] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355
2023-07-23 20:34:17.571414: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
2023-07-23 20:34:17.571463: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 13692 MB memory: -> device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5
WARNING:tensorflow:`tf.keras.layers.experimental.SyncBatchNormalization` endpoint is deprecated and will be removed in a future release. Please use `tf.keras.layers.BatchNormalization` with parameter `synchronized` set to True.
W0723 20:34:17.636212 134204322144256 batch_normalization.py:1531] `tf.keras.layers.experimental.SyncBatchNormalization` endpoint is deprecated and will be removed in a future release. Please use `tf.keras.layers.BatchNormalization` with parameter `synchronized` set to True.
W0723 20:34:19.543061 134204322144256 model_builder.py:1112] Building experimental DeepMAC meta-arch. Some features may be omitted.
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_deepmac): 6.46s
I0723 20:34:20.823199 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_deepmac): 6.46s
[ OK ] ModelBuilderTF2Test.test_create_center_net_deepmac
[ RUN ] ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)): 2.35s
I0723 20:34:23.174165 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)): 2.35s
[ OK ] ModelBuilderTF2Test.test_create_center_net_model0 (customize_head_params=True)
[ RUN ] ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)): 2.09s
I0723 20:34:25.265520 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)): 2.09s
[ OK ] ModelBuilderTF2Test.test_create_center_net_model1 (customize_head_params=False)
[ RUN ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 1.21s
I0723 20:34:26.483785 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model_from_keypoints): 1.21s
[ OK ] ModelBuilderTF2Test.test_create_center_net_model_from_keypoints
[ RUN ] ModelBuilderTF2Test.test_create_center_net_model_mobilenet
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_center_net_model_mobilenet): 3.77s
I0723 20:34:30.249958 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_center_net_model_mobilenet): 3.77s
[ OK ] ModelBuilderTF2Test.test_create_center_net_model_mobilenet
[ RUN ] ModelBuilderTF2Test.test_create_experimental_model
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s
I0723 20:34:30.259367 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_experimental_model): 0.0s
[ OK ] ModelBuilderTF2Test.test_create_experimental_model
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.04s
I0723 20:34:30.304257 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)): 0.04s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature0 (True)
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.03s
I0723 20:34:30.334177 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)): 0.03s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_from_config_with_crop_feature1 (False)
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.03s
I0723 20:34:30.364976 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner): 0.03s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_model_from_config_with_example_miner
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.18s
I0723 20:34:30.550214 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul): 0.18s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_with_matmul
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.18s
I0723 20:34:30.730468 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul): 0.18s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_faster_rcnn_without_matmul
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.19s
I0723 20:34:30.918853 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul): 0.19s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_with_matmul
[ RUN ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.18s
I0723 20:34:31.099937 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul): 0.18s
[ OK ] ModelBuilderTF2Test.test_create_faster_rcnn_models_from_config_mask_rcnn_without_matmul
[ RUN ] ModelBuilderTF2Test.test_create_rfcn_model_from_config
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.17s
I0723 20:34:31.274461 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_rfcn_model_from_config): 0.17s
[ OK ] ModelBuilderTF2Test.test_create_rfcn_model_from_config
[ RUN ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.05s
I0723 20:34:31.327493 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config): 0.05s
[ OK ] ModelBuilderTF2Test.test_create_ssd_fpn_model_from_config
[ RUN ] ModelBuilderTF2Test.test_create_ssd_models_from_config
I0723 20:34:31.658299 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b0
I0723 20:34:31.658518 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 64
I0723 20:34:31.658604 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 3
I0723 20:34:31.662760 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:31.710141 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:31.710280 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:31.827352 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:31.827696 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:32.134507 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:32.134690 134204322144256 efficientnet_model.py:143] round_filter input=40 output=40
I0723 20:34:32.439123 134204322144256 efficientnet_model.py:143] round_filter input=40 output=40
I0723 20:34:32.439330 134204322144256 efficientnet_model.py:143] round_filter input=80 output=80
I0723 20:34:32.878896 134204322144256 efficientnet_model.py:143] round_filter input=80 output=80
I0723 20:34:32.879101 134204322144256 efficientnet_model.py:143] round_filter input=112 output=112
I0723 20:34:33.324103 134204322144256 efficientnet_model.py:143] round_filter input=112 output=112
I0723 20:34:33.324326 134204322144256 efficientnet_model.py:143] round_filter input=192 output=192
I0723 20:34:33.897471 134204322144256 efficientnet_model.py:143] round_filter input=192 output=192
I0723 20:34:33.897680 134204322144256 efficientnet_model.py:143] round_filter input=320 output=320
I0723 20:34:34.034998 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=1280
I0723 20:34:34.103658 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.0, resolution=224, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:34.202666 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b1
I0723 20:34:34.202854 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 88
I0723 20:34:34.202937 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 4
I0723 20:34:34.205981 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:34.234901 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:34.235054 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:34.467478 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:34.467666 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:34.900458 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:34.900649 134204322144256 efficientnet_model.py:143] round_filter input=40 output=40
I0723 20:34:35.600800 134204322144256 efficientnet_model.py:143] round_filter input=40 output=40
I0723 20:34:35.600993 134204322144256 efficientnet_model.py:143] round_filter input=80 output=80
I0723 20:34:36.162153 134204322144256 efficientnet_model.py:143] round_filter input=80 output=80
I0723 20:34:36.162388 134204322144256 efficientnet_model.py:143] round_filter input=112 output=112
I0723 20:34:36.663292 134204322144256 efficientnet_model.py:143] round_filter input=112 output=112
I0723 20:34:36.663468 134204322144256 efficientnet_model.py:143] round_filter input=192 output=192
I0723 20:34:37.121645 134204322144256 efficientnet_model.py:143] round_filter input=192 output=192
I0723 20:34:37.121803 134204322144256 efficientnet_model.py:143] round_filter input=320 output=320
I0723 20:34:37.308829 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=1280
I0723 20:34:37.343785 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.1, resolution=240, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:37.413658 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b2
I0723 20:34:37.413905 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 112
I0723 20:34:37.414020 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 5
I0723 20:34:37.416132 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:37.434121 134204322144256 efficientnet_model.py:143] round_filter input=32 output=32
I0723 20:34:37.434228 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:37.572172 134204322144256 efficientnet_model.py:143] round_filter input=16 output=16
I0723 20:34:37.572292 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:37.842743 134204322144256 efficientnet_model.py:143] round_filter input=24 output=24
I0723 20:34:37.842898 134204322144256 efficientnet_model.py:143] round_filter input=40 output=48
I0723 20:34:38.120220 134204322144256 efficientnet_model.py:143] round_filter input=40 output=48
I0723 20:34:38.120415 134204322144256 efficientnet_model.py:143] round_filter input=80 output=88
I0723 20:34:38.523064 134204322144256 efficientnet_model.py:143] round_filter input=80 output=88
I0723 20:34:38.523231 134204322144256 efficientnet_model.py:143] round_filter input=112 output=120
I0723 20:34:38.910779 134204322144256 efficientnet_model.py:143] round_filter input=112 output=120
I0723 20:34:38.910934 134204322144256 efficientnet_model.py:143] round_filter input=192 output=208
I0723 20:34:39.398663 134204322144256 efficientnet_model.py:143] round_filter input=192 output=208
I0723 20:34:39.399083 134204322144256 efficientnet_model.py:143] round_filter input=320 output=352
I0723 20:34:39.586705 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=1408
I0723 20:34:39.627840 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.1, depth_coefficient=1.2, resolution=260, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:39.692908 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b3
I0723 20:34:39.693046 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 160
I0723 20:34:39.693119 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 6
I0723 20:34:39.695039 134204322144256 efficientnet_model.py:143] round_filter input=32 output=40
I0723 20:34:39.716570 134204322144256 efficientnet_model.py:143] round_filter input=32 output=40
I0723 20:34:39.716707 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:39.873191 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:39.873349 134204322144256 efficientnet_model.py:143] round_filter input=24 output=32
I0723 20:34:40.151925 134204322144256 efficientnet_model.py:143] round_filter input=24 output=32
I0723 20:34:40.152084 134204322144256 efficientnet_model.py:143] round_filter input=40 output=48
I0723 20:34:40.433294 134204322144256 efficientnet_model.py:143] round_filter input=40 output=48
I0723 20:34:40.433466 134204322144256 efficientnet_model.py:143] round_filter input=80 output=96
I0723 20:34:40.912874 134204322144256 efficientnet_model.py:143] round_filter input=80 output=96
I0723 20:34:40.913071 134204322144256 efficientnet_model.py:143] round_filter input=112 output=136
I0723 20:34:41.404938 134204322144256 efficientnet_model.py:143] round_filter input=112 output=136
I0723 20:34:41.405106 134204322144256 efficientnet_model.py:143] round_filter input=192 output=232
I0723 20:34:41.960454 134204322144256 efficientnet_model.py:143] round_filter input=192 output=232
I0723 20:34:41.960607 134204322144256 efficientnet_model.py:143] round_filter input=320 output=384
I0723 20:34:42.145941 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=1536
I0723 20:34:42.186351 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.2, depth_coefficient=1.4, resolution=300, dropout_rate=0.3, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:42.258904 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b4
I0723 20:34:42.259042 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 224
I0723 20:34:42.259114 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 7
I0723 20:34:42.261063 134204322144256 efficientnet_model.py:143] round_filter input=32 output=48
I0723 20:34:42.283690 134204322144256 efficientnet_model.py:143] round_filter input=32 output=48
I0723 20:34:42.283810 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:42.441046 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:42.441196 134204322144256 efficientnet_model.py:143] round_filter input=24 output=32
I0723 20:34:42.802810 134204322144256 efficientnet_model.py:143] round_filter input=24 output=32
I0723 20:34:42.802987 134204322144256 efficientnet_model.py:143] round_filter input=40 output=56
I0723 20:34:43.436457 134204322144256 efficientnet_model.py:143] round_filter input=40 output=56
I0723 20:34:43.436628 134204322144256 efficientnet_model.py:143] round_filter input=80 output=112
I0723 20:34:43.998985 134204322144256 efficientnet_model.py:143] round_filter input=80 output=112
I0723 20:34:43.999147 134204322144256 efficientnet_model.py:143] round_filter input=112 output=160
I0723 20:34:44.592246 134204322144256 efficientnet_model.py:143] round_filter input=112 output=160
I0723 20:34:44.592421 134204322144256 efficientnet_model.py:143] round_filter input=192 output=272
I0723 20:34:45.357318 134204322144256 efficientnet_model.py:143] round_filter input=192 output=272
I0723 20:34:45.357506 134204322144256 efficientnet_model.py:143] round_filter input=320 output=448
I0723 20:34:45.557609 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=1792
I0723 20:34:45.595705 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.4, depth_coefficient=1.8, resolution=380, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:45.679996 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b5
I0723 20:34:45.680130 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 288
I0723 20:34:45.680191 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 7
I0723 20:34:45.682197 134204322144256 efficientnet_model.py:143] round_filter input=32 output=48
I0723 20:34:45.701752 134204322144256 efficientnet_model.py:143] round_filter input=32 output=48
I0723 20:34:45.701866 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:45.920870 134204322144256 efficientnet_model.py:143] round_filter input=16 output=24
I0723 20:34:45.921021 134204322144256 efficientnet_model.py:143] round_filter input=24 output=40
I0723 20:34:46.377120 134204322144256 efficientnet_model.py:143] round_filter input=24 output=40
I0723 20:34:46.377302 134204322144256 efficientnet_model.py:143] round_filter input=40 output=64
I0723 20:34:46.989664 134204322144256 efficientnet_model.py:143] round_filter input=40 output=64
I0723 20:34:46.989847 134204322144256 efficientnet_model.py:143] round_filter input=80 output=128
I0723 20:34:47.919650 134204322144256 efficientnet_model.py:143] round_filter input=80 output=128
I0723 20:34:47.919846 134204322144256 efficientnet_model.py:143] round_filter input=112 output=176
I0723 20:34:48.861994 134204322144256 efficientnet_model.py:143] round_filter input=112 output=176
I0723 20:34:48.862195 134204322144256 efficientnet_model.py:143] round_filter input=192 output=304
I0723 20:34:50.135412 134204322144256 efficientnet_model.py:143] round_filter input=192 output=304
I0723 20:34:50.135600 134204322144256 efficientnet_model.py:143] round_filter input=320 output=512
I0723 20:34:50.589513 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=2048
I0723 20:34:50.650698 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.6, depth_coefficient=2.2, resolution=456, dropout_rate=0.4, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:50.810115 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b6
I0723 20:34:50.810914 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 384
I0723 20:34:50.811031 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 8
I0723 20:34:50.814146 134204322144256 efficientnet_model.py:143] round_filter input=32 output=56
I0723 20:34:50.844530 134204322144256 efficientnet_model.py:143] round_filter input=32 output=56
I0723 20:34:50.844656 134204322144256 efficientnet_model.py:143] round_filter input=16 output=32
I0723 20:34:51.185732 134204322144256 efficientnet_model.py:143] round_filter input=16 output=32
I0723 20:34:51.185930 134204322144256 efficientnet_model.py:143] round_filter input=24 output=40
I0723 20:34:52.048858 134204322144256 efficientnet_model.py:143] round_filter input=24 output=40
I0723 20:34:52.049053 134204322144256 efficientnet_model.py:143] round_filter input=40 output=72
I0723 20:34:52.905248 134204322144256 efficientnet_model.py:143] round_filter input=40 output=72
I0723 20:34:52.905460 134204322144256 efficientnet_model.py:143] round_filter input=80 output=144
I0723 20:34:54.442086 134204322144256 efficientnet_model.py:143] round_filter input=80 output=144
I0723 20:34:54.442304 134204322144256 efficientnet_model.py:143] round_filter input=112 output=200
I0723 20:34:55.592005 134204322144256 efficientnet_model.py:143] round_filter input=112 output=200
I0723 20:34:55.592208 134204322144256 efficientnet_model.py:143] round_filter input=192 output=344
I0723 20:34:57.173216 134204322144256 efficientnet_model.py:143] round_filter input=192 output=344
I0723 20:34:57.173430 134204322144256 efficientnet_model.py:143] round_filter input=320 output=576
I0723 20:34:57.606037 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=2304
I0723 20:34:57.665544 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=1.8, depth_coefficient=2.6, resolution=528, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
I0723 20:34:57.845144 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:150] EfficientDet EfficientNet backbone version: efficientnet-b7
I0723 20:34:57.845336 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:152] EfficientDet BiFPN num filters: 384
I0723 20:34:57.845443 134204322144256 ssd_efficientnet_bifpn_feature_extractor.py:153] EfficientDet BiFPN num iterations: 8
I0723 20:34:57.848743 134204322144256 efficientnet_model.py:143] round_filter input=32 output=64
I0723 20:34:57.885626 134204322144256 efficientnet_model.py:143] round_filter input=32 output=64
I0723 20:34:57.885745 134204322144256 efficientnet_model.py:143] round_filter input=16 output=32
I0723 20:34:58.342285 134204322144256 efficientnet_model.py:143] round_filter input=16 output=32
I0723 20:34:58.342501 134204322144256 efficientnet_model.py:143] round_filter input=24 output=48
I0723 20:34:59.027451 134204322144256 efficientnet_model.py:143] round_filter input=24 output=48
I0723 20:34:59.027620 134204322144256 efficientnet_model.py:143] round_filter input=40 output=80
I0723 20:34:59.679092 134204322144256 efficientnet_model.py:143] round_filter input=40 output=80
I0723 20:34:59.679250 134204322144256 efficientnet_model.py:143] round_filter input=80 output=160
I0723 20:35:00.596528 134204322144256 efficientnet_model.py:143] round_filter input=80 output=160
I0723 20:35:00.596683 134204322144256 efficientnet_model.py:143] round_filter input=112 output=224
I0723 20:35:01.537988 134204322144256 efficientnet_model.py:143] round_filter input=112 output=224
I0723 20:35:01.538163 134204322144256 efficientnet_model.py:143] round_filter input=192 output=384
I0723 20:35:02.753057 134204322144256 efficientnet_model.py:143] round_filter input=192 output=384
I0723 20:35:02.753216 134204322144256 efficientnet_model.py:143] round_filter input=320 output=640
I0723 20:35:03.139790 134204322144256 efficientnet_model.py:143] round_filter input=1280 output=2560
I0723 20:35:03.179669 134204322144256 efficientnet_model.py:453] Building model efficientnet with params ModelConfig(width_coefficient=2.0, depth_coefficient=3.1, resolution=600, dropout_rate=0.5, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 31.97s
I0723 20:35:03.301067 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_create_ssd_models_from_config): 31.97s
[ OK ] ModelBuilderTF2Test.test_create_ssd_models_from_config
[ RUN ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s
I0723 20:35:03.329191 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update): 0.0s
[ OK ] ModelBuilderTF2Test.test_invalid_faster_rcnn_batchnorm_update
[ RUN ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s
I0723 20:35:03.330960 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold): 0.0s
[ OK ] ModelBuilderTF2Test.test_invalid_first_stage_nms_iou_threshold
[ RUN ] ModelBuilderTF2Test.test_invalid_model_config_proto
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s
I0723 20:35:03.331539 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_model_config_proto): 0.0s
[ OK ] ModelBuilderTF2Test.test_invalid_model_config_proto
[ RUN ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s
I0723 20:35:03.333095 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_invalid_second_stage_batch_size): 0.0s
[ OK ] ModelBuilderTF2Test.test_invalid_second_stage_batch_size
[ RUN ] ModelBuilderTF2Test.test_session
[ SKIPPED ] ModelBuilderTF2Test.test_session
[ RUN ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s
I0723 20:35:03.334475 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor): 0.0s
[ OK ] ModelBuilderTF2Test.test_unknown_faster_rcnn_feature_extractor
[ RUN ] ModelBuilderTF2Test.test_unknown_meta_architecture
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s
I0723 20:35:03.334891 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_meta_architecture): 0.0s
[ OK ] ModelBuilderTF2Test.test_unknown_meta_architecture
[ RUN ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor
INFO:tensorflow:time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s
I0723 20:35:03.335854 134204322144256 test_util.py:2462] time(__main__.ModelBuilderTF2Test.test_unknown_ssd_feature_extractor): 0.0s
[ OK ] ModelBuilderTF2Test.test_unknown_ssd_feature_extractor
----------------------------------------------------------------------
Ran 24 tests in 48.975s
OK (skipped=1)
# Go back to Tensorflow/
%cd ..
%cd ..
/content/Tensorflow/models /content/Tensorflow
# Clone and install CocoAPI
!git clone https://github.com/cocodataset/cocoapi.git
%cd cocoapi/PythonAPI
!make
%cp -r pycocotools /content/Tensorflow/models/research/
Cloning into 'cocoapi'... remote: Enumerating objects: 975, done. remote: Total 975 (delta 0), reused 0 (delta 0), pack-reused 975 Receiving objects: 100% (975/975), 11.72 MiB | 23.59 MiB/s, done. Resolving deltas: 100% (576/576), done. /content/Tensorflow/cocoapi/PythonAPI python setup.py build_ext --inplace running build_ext cythoning pycocotools/_mask.pyx to pycocotools/_mask.c /usr/local/lib/python3.10/dist-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /content/Tensorflow/cocoapi/PythonAPI/pycocotools/_mask.pyx tree = Parsing.p_module(s, pxd, full_module_name) building 'pycocotools._mask' extension creating build creating build/common creating build/temp.linux-x86_64-cpython-310 creating build/temp.linux-x86_64-cpython-310/pycocotools x86_64-linux-gnu-gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -fPIC -I/usr/local/lib/python3.10/dist-packages/numpy/core/include -I../common -I/usr/include/python3.10 -c ../common/maskApi.c -o build/temp.linux-x86_64-cpython-310/../common/maskApi.o -Wno-cpp -Wno-unused-function -std=c99 ../common/maskApi.c: In function ‘rleDecode’: ../common/maskApi.c:46:7: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 46 | for( k=0; k<R[i].cnts[j]; k++ ) *(M++)=v; v=!v; }} | ^~~ ../common/maskApi.c:46:49: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’ 46 | for( k=0; k<R[i].cnts[j]; k++ ) *(M++)=v; v=!v; }} | ^ ../common/maskApi.c: In function ‘rleFrPoly’: ../common/maskApi.c:166:3: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 166 | for(j=0; j<k; j++) x[j]=(int)(scale*xy[j*2+0]+.5); x[k]=x[0]; | ^~~ ../common/maskApi.c:166:54: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’ 166 | for(j=0; j<k; j++) x[j]=(int)(scale*xy[j*2+0]+.5); x[k]=x[0]; | ^ ../common/maskApi.c:167:3: warning: this ‘for’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 167 | for(j=0; j<k; j++) y[j]=(int)(scale*xy[j*2+1]+.5); y[k]=y[0]; | ^~~ ../common/maskApi.c:167:54: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘for’ 167 | for(j=0; j<k; j++) y[j]=(int)(scale*xy[j*2+1]+.5); y[k]=y[0]; | ^ ../common/maskApi.c: In function ‘rleToString’: ../common/maskApi.c:212:7: warning: this ‘if’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 212 | if(more) c |= 0x20; c+=48; s[p++]=c; | ^~ ../common/maskApi.c:212:27: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘if’ 212 | if(more) c |= 0x20; c+=48; s[p++]=c; | ^ ../common/maskApi.c: In function ‘rleFrString’: ../common/maskApi.c:220:3: warning: this ‘while’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 220 | while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0; | ^~~~~ ../common/maskApi.c:220:22: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘while’ 220 | while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0; | ^~~~ ../common/maskApi.c:228:5: warning: this ‘if’ clause does not guard... []8;;https://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html#index-Wmisleading-indentation-Wmisleading-indentation]8;;] 228 | if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x; | ^~ ../common/maskApi.c:228:34: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘if’ 228 | if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x; | ^~~~ x86_64-linux-gnu-gcc -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -g -fwrapv -O2 -fPIC -I/usr/local/lib/python3.10/dist-packages/numpy/core/include -I../common -I/usr/include/python3.10 -c pycocotools/_mask.c -o build/temp.linux-x86_64-cpython-310/pycocotools/_mask.o -Wno-cpp -Wno-unused-function -std=c99 creating build/lib.linux-x86_64-cpython-310 creating build/lib.linux-x86_64-cpython-310/pycocotools x86_64-linux-gnu-gcc -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -g -fwrapv -O2 build/temp.linux-x86_64-cpython-310/../common/maskApi.o build/temp.linux-x86_64-cpython-310/pycocotools/_mask.o -L/usr/lib/x86_64-linux-gnu -o build/lib.linux-x86_64-cpython-310/pycocotools/_mask.cpython-310-x86_64-linux-gnu.so copying build/lib.linux-x86_64-cpython-310/pycocotools/_mask.cpython-310-x86_64-linux-gnu.so -> pycocotools rm -rf build
The dataset contains 4 categories:
| Category |
|---|
| Traffic Light |
| Stop |
| Speed Limit |
| Crosswalk |
# Go back to Tensorflow/
%cd ..
%cd ..
/content/Tensorflow/cocoapi /content/Tensorflow
# Create new directory Tensorflow/data/
if not os.path.exists("data"):
os.mkdir("data")
# Download dataset to Tensorflow/data/
%cd data
!gdown "https://drive.google.com/uc?export=download&id=1DgDd-PHRYb-y0FGDDXHKbi6ZaGoaTimB"
with ZipFile("archive.zip") as zipfile:
zipfile.extractall()
!rm "archive.zip"
/content/Tensorflow/data Downloading... From: https://drive.google.com/uc?export=download&id=1DgDd-PHRYb-y0FGDDXHKbi6ZaGoaTimB To: /content/Tensorflow/data/archive.zip 100% 229M/229M [00:04<00:00, 51.6MB/s]
# Still in Tensorflow/data/, create train and valid directories
if not os.path.exists("train"):
os.mkdir("train")
if not os.path.exists("valid"):
os.mkdir("valid")
import numpy as np
np.random.seed(42)
all_pngs = os.listdir("./images")
# train:valid ratio is 8:2
train_size = int(len(all_pngs) * .8)
# prepare lists containing of train and validation images
train_pngs = np.random.choice(all_pngs, train_size, replace=False)
train_xmls = [png[:-4]+'.xml' for png in train_pngs]
valid_pngs = np.setdiff1d(all_pngs, train_pngs)
valid_xmls = [png[:-4]+'.xml' for png in valid_pngs]
len(all_pngs), len(train_pngs), len(valid_pngs)
(877, 701, 176)
import shutil
# Copy train and validation images to the corresponding directories
# images go from Tensorflow/data/images/ to Tensorflow/data/train/ (or valid/)
# annotations go from Tensorflow/data/annotations/ to Tensorflow/data/train/ (or valid/)
for png in train_pngs:
shutil.copy("images/"+png, "train/")
for xml in train_xmls:
shutil.copy("annotations/"+xml, "train/")
for png in valid_pngs:
shutil.copy("images/"+png, "valid/")
for xml in valid_xmls:
shutil.copy("annotations/"+xml, "valid/")
# Go back to Tensorflow/
%cd ..
/content/Tensorflow
# Labels
labels = [{'name':'trafficlight', 'id':1}, {'name':'stop', 'id':2}, {'name':'speedlimit', 'id':3}, {'name':'crosswalk', 'id':4}]
# Create new directory to store lable map file: Tensorflow/training/annotations/
if not os.path.exists("training"):
os.mkdir("training")
if not os.path.exists("training/annotations"):
os.mkdir("training/annotations")
# Create and store label map file to the new directory
with open("training/annotations/label_map.pbtxt", 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
# Create new directory to store TFRecord generation script: Tensorflow/scripts/
if not os.path.exists("scripts"):
os.mkdir("scripts")
if not os.path.exists("scripts/preprocessing"):
os.mkdir("scripts/preprocessing")
!wget https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py -P scripts/preprocessing
--2023-07-23 20:35:31-- https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/_downloads/da4babe668a8afb093cc7776d7e630f3/generate_tfrecord.py Resolving tensorflow-object-detection-api-tutorial.readthedocs.io (tensorflow-object-detection-api-tutorial.readthedocs.io)... 104.17.33.82, 104.17.32.82, 2606:4700::6811:2052, ... Connecting to tensorflow-object-detection-api-tutorial.readthedocs.io (tensorflow-object-detection-api-tutorial.readthedocs.io)|104.17.33.82|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 6410 (6.3K) [text/x-python] Saving to: ‘scripts/preprocessing/generate_tfrecord.py’ generate_tfrecord.p 100%[===================>] 6.26K --.-KB/s in 0s 2023-07-23 20:35:31 (83.6 MB/s) - ‘scripts/preprocessing/generate_tfrecord.py’ saved [6410/6410]
# Generate TFRecord files by running the script in Tensorflow/scripts/ and store the generated file in Tensorflow/training/annotations/
%cd scripts/preprocessing
!python generate_tfrecord.py -x /content/Tensorflow/data/train -l /content/Tensorflow/training/annotations/label_map.pbtxt -o /content/Tensorflow/training/annotations/train.record
!python generate_tfrecord.py -x /content/Tensorflow/data/valid -l /content/Tensorflow/training/annotations/label_map.pbtxt -o /content/Tensorflow/training//annotations/valid.record
/content/Tensorflow/scripts/preprocessing Successfully created the TFRecord file: /content/Tensorflow/training/annotations/train.record Successfully created the TFRecord file: /content/Tensorflow/training//annotations/valid.record
# go back to Tensorflow/
%cd ..
%cd ..
/content/Tensorflow/scripts /content/Tensorflow
# Create new directory to store pretrained model: Tensorflow/pre-trained-models/
if not os.path.exists("pre-trained-models"):
os.mkdir("pre-trained-models")
# Download pretrained model from model zoo
command = f"wget http://download.tensorflow.org/models/object_detection/tf2/20200711/{MODEL_NAME}.tar.gz -P pre-trained-models"
!{command}
with tarfile.open(f"pre-trained-models/{MODEL_NAME}.tar.gz") as tar:
tar.extractall("pre-trained-models")
command = f"rm pre-trained-models/{MODEL_NAME}.tar.gz"
!{command}
--2023-07-23 20:35:43-- http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz Resolving download.tensorflow.org (download.tensorflow.org)... 173.194.217.128, 2607:f8b0:400c:c13::80 Connecting to download.tensorflow.org (download.tensorflow.org)|173.194.217.128|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 20518283 (20M) [application/x-tar] Saving to: ‘pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’ ssd_mobilenet_v2_fp 100%[===================>] 19.57M 130MB/s in 0.2s 2023-07-23 20:35:44 (130 MB/s) - ‘pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz’ saved [20518283/20518283]
# Create new directory to save training config file: TensorFlow/training/models/
# Copy config file from TensorFlow/pre-trained-model/ to TensorFlow/training/models/
if not os.path.exists("training"):
os.mkdir("training")
if not os.path.exists("training/models"):
os.mkdir("training/models")
os.mkdir(f"training/models/{MODEL_NAME}")
PIPELINE_CONFIG = f"/content/Tensorflow/training/models/{MODEL_NAME}/pipeline.config"
shutil.copy(f"/content/Tensorflow/pre-trained-models/{MODEL_NAME}/pipeline.config", PIPELINE_CONFIG)
'/content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config'
# Modify training config
import tensorflow as tf
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(PIPELINE_CONFIG, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
pipeline_config
model {
ssd {
num_classes: 90
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
feature_extractor {
type: "ssd_mobilenet_v2_fpn_keras"
depth_multiplier: 1.0
min_depth: 16
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.9999998989515007e-05
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.009999999776482582
}
}
activation: RELU_6
batch_norm {
decay: 0.996999979019165
scale: true
epsilon: 0.0010000000474974513
}
}
use_depthwise: true
override_base_feature_extractor_hyperparams: true
fpn {
min_level: 3
max_level: 7
additional_layer_depth: 128
}
}
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.9999998989515007e-05
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.009999999776482582
}
}
activation: RELU_6
batch_norm {
decay: 0.996999979019165
scale: true
epsilon: 0.0010000000474974513
}
}
depth: 128
num_layers_before_predictor: 4
kernel_size: 3
class_prediction_bias_init: -4.599999904632568
share_prediction_tower: true
use_depthwise: true
}
}
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
scales_per_octave: 2
}
}
post_processing {
batch_non_max_suppression {
score_threshold: 9.99999993922529e-09
iou_threshold: 0.6000000238418579
max_detections_per_class: 100
max_total_detections: 100
use_static_shapes: false
}
score_converter: SIGMOID
}
normalize_loss_by_num_matches: true
loss {
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_sigmoid_focal {
gamma: 2.0
alpha: 0.25
}
}
classification_weight: 1.0
localization_weight: 1.0
}
encode_background_as_zeros: true
normalize_loc_loss_by_codesize: true
inplace_batchnorm_update: true
freeze_batchnorm: false
}
}
train_config {
batch_size: 128
data_augmentation_options {
random_horizontal_flip {
}
}
data_augmentation_options {
random_crop_image {
min_object_covered: 0.0
min_aspect_ratio: 0.75
max_aspect_ratio: 3.0
min_area: 0.75
max_area: 1.0
overlap_thresh: 0.0
}
}
sync_replicas: true
optimizer {
momentum_optimizer {
learning_rate {
cosine_decay_learning_rate {
learning_rate_base: 0.07999999821186066
total_steps: 50000
warmup_learning_rate: 0.026666000485420227
warmup_steps: 1000
}
}
momentum_optimizer_value: 0.8999999761581421
}
use_moving_average: false
}
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED"
num_steps: 50000
startup_delay_steps: 0.0
replicas_to_aggregate: 8
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_type: "classification"
fine_tune_checkpoint_version: V2
}
train_input_reader {
label_map_path: "PATH_TO_BE_CONFIGURED"
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED"
}
}
eval_config {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
}
eval_input_reader {
label_map_path: "PATH_TO_BE_CONFIGURED"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED"
}
}
pipeline_config.model.ssd.num_classes = len(labels)
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = os.path.join(f"/content/Tensorflow/pre-trained-models/{MODEL_NAME}/checkpoint", "ckpt-0")
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= "/content/Tensorflow/training/annotations/label_map.pbtxt"
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = ["/content/Tensorflow/training/annotations/train.record"]
pipeline_config.eval_input_reader[0].label_map_path = "/content/Tensorflow/training/annotations/label_map.pbtxt"
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = ["/content/Tensorflow/training/annotations/valid.record"]
# Remove data augmentation
pipeline_config.train_config.ClearField("data_augmentation_options")
pipeline_config
model {
ssd {
num_classes: 4
image_resizer {
fixed_shape_resizer {
height: 640
width: 640
}
}
feature_extractor {
type: "ssd_mobilenet_v2_fpn_keras"
depth_multiplier: 1.0
min_depth: 16
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.9999998989515007e-05
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.009999999776482582
}
}
activation: RELU_6
batch_norm {
decay: 0.996999979019165
scale: true
epsilon: 0.0010000000474974513
}
}
use_depthwise: true
override_base_feature_extractor_hyperparams: true
fpn {
min_level: 3
max_level: 7
additional_layer_depth: 128
}
}
box_coder {
faster_rcnn_box_coder {
y_scale: 10.0
x_scale: 10.0
height_scale: 5.0
width_scale: 5.0
}
}
matcher {
argmax_matcher {
matched_threshold: 0.5
unmatched_threshold: 0.5
ignore_thresholds: false
negatives_lower_than_unmatched: true
force_match_for_each_row: true
use_matmul_gather: true
}
}
similarity_calculator {
iou_similarity {
}
}
box_predictor {
weight_shared_convolutional_box_predictor {
conv_hyperparams {
regularizer {
l2_regularizer {
weight: 3.9999998989515007e-05
}
}
initializer {
random_normal_initializer {
mean: 0.0
stddev: 0.009999999776482582
}
}
activation: RELU_6
batch_norm {
decay: 0.996999979019165
scale: true
epsilon: 0.0010000000474974513
}
}
depth: 128
num_layers_before_predictor: 4
kernel_size: 3
class_prediction_bias_init: -4.599999904632568
share_prediction_tower: true
use_depthwise: true
}
}
anchor_generator {
multiscale_anchor_generator {
min_level: 3
max_level: 7
anchor_scale: 4.0
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
scales_per_octave: 2
}
}
post_processing {
batch_non_max_suppression {
score_threshold: 9.99999993922529e-09
iou_threshold: 0.6000000238418579
max_detections_per_class: 100
max_total_detections: 100
use_static_shapes: false
}
score_converter: SIGMOID
}
normalize_loss_by_num_matches: true
loss {
localization_loss {
weighted_smooth_l1 {
}
}
classification_loss {
weighted_sigmoid_focal {
gamma: 2.0
alpha: 0.25
}
}
classification_weight: 1.0
localization_weight: 1.0
}
encode_background_as_zeros: true
normalize_loc_loss_by_codesize: true
inplace_batchnorm_update: true
freeze_batchnorm: false
}
}
train_config {
batch_size: 4
sync_replicas: true
optimizer {
momentum_optimizer {
learning_rate {
cosine_decay_learning_rate {
learning_rate_base: 0.07999999821186066
total_steps: 50000
warmup_learning_rate: 0.026666000485420227
warmup_steps: 1000
}
}
momentum_optimizer_value: 0.8999999761581421
}
use_moving_average: false
}
fine_tune_checkpoint: "/content/Tensorflow/pre-trained-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0"
num_steps: 50000
startup_delay_steps: 0.0
replicas_to_aggregate: 8
max_number_of_boxes: 100
unpad_groundtruth_tensors: false
fine_tune_checkpoint_type: "detection"
fine_tune_checkpoint_version: V2
}
train_input_reader {
label_map_path: "/content/Tensorflow/training/annotations/label_map.pbtxt"
tf_record_input_reader {
input_path: "/content/Tensorflow/training/annotations/train.record"
}
}
eval_config {
metrics_set: "coco_detection_metrics"
use_moving_averages: false
}
eval_input_reader {
label_map_path: "/content/Tensorflow/training/annotations/label_map.pbtxt"
shuffle: false
num_epochs: 1
tf_record_input_reader {
input_path: "/content/Tensorflow/training/annotations/valid.record"
}
}
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(PIPELINE_CONFIG, "wb") as f:
f.write(config_text)
# Set number of training steps
TRAINING_STEPS=5000
# Run the training script to finetune model
TRAINING_SCRIPT = "/content/Tensorflow/models/research/object_detection/model_main_tf2.py"
MODEL_DIR = f"/content/Tensorflow/training/models/{MODEL_NAME}"
command = f"python {TRAINING_SCRIPT} --model_dir={MODEL_DIR} --pipeline_config_path={PIPELINE_CONFIG} --num_train_steps={TRAINING_STEPS}"
!{command}
2023-07-23 20:37:06.598169: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-23 20:37:10.523128: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
I0723 20:37:10.524252 140626572066816 mirrored_strategy.py:419] Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)
INFO:tensorflow:Maybe overwriting train_steps: 5000
I0723 20:37:10.550157 140626572066816 config_util.py:552] Maybe overwriting train_steps: 5000
INFO:tensorflow:Maybe overwriting use_bfloat16: False
I0723 20:37:10.550368 140626572066816 config_util.py:552] Maybe overwriting use_bfloat16: False
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py:563: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version.
Instructions for updating:
rename to distribute_datasets_from_function
W0723 20:37:10.723247 140626572066816 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py:563: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version.
Instructions for updating:
rename to distribute_datasets_from_function
INFO:tensorflow:Reading unweighted datasets: ['/content/Tensorflow/training/annotations/train.record']
I0723 20:37:10.731438 140626572066816 dataset_builder.py:162] Reading unweighted datasets: ['/content/Tensorflow/training/annotations/train.record']
INFO:tensorflow:Reading record datasets for input file: ['/content/Tensorflow/training/annotations/train.record']
I0723 20:37:10.731647 140626572066816 dataset_builder.py:79] Reading record datasets for input file: ['/content/Tensorflow/training/annotations/train.record']
INFO:tensorflow:Number of filenames to read: 1
I0723 20:37:10.731755 140626572066816 dataset_builder.py:80] Number of filenames to read: 1
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
W0723 20:37:10.731831 140626572066816 dataset_builder.py:86] num_readers has been reduced to 1 to match input file shards.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
W0723 20:37:10.740601 140626572066816 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
W0723 20:37:10.759402 140626572066816 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
W0723 20:37:14.690590 140626572066816 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0723 20:37:18.175598 140626572066816 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
/usr/local/lib/python3.10/dist-packages/keras/src/backend.py:452: UserWarning: `tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.
warnings.warn(
I0723 20:37:29.609728 140621826209344 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 20:37:39.044544 140621826209344 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
2023-07-23 20:37:43.572690: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-23 20:37:44.229580: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-23 20:37:44.248190: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-23 20:37:44.280742: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
2023-07-23 20:37:44.299584: W tensorflow/tsl/framework/cpu_allocator_impl.cc:83] Allocation of 20460000 exceeds 10% of free system memory.
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.854150 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.856749 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.857783 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.858721 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.862453 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.863305 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.864230 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.865186 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.870879 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
I0723 20:37:53.871886 140626572066816 cross_device_ops.py:617] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/deprecation.py:648: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Use fn_output_signature instead
W0723 20:37:55.100840 140621834602048 deprecation.py:569] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/deprecation.py:648: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Use fn_output_signature instead
I0723 20:37:56.579996 140621834602048 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 20:38:04.007960 140621834602048 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 20:38:10.766422 140621834602048 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 20:38:15.955888 140621834602048 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
INFO:tensorflow:Step 100 per-step time 0.615s
I0723 20:38:56.319948 140626572066816 model_lib_v2.py:705] Step 100 per-step time 0.615s
INFO:tensorflow:{'Loss/classification_loss': 0.51440316,
'Loss/localization_loss': 0.28139538,
'Loss/regularization_loss': 0.15212226,
'Loss/total_loss': 0.9479208,
'learning_rate': 0.0319994}
I0723 20:38:56.320372 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.51440316,
'Loss/localization_loss': 0.28139538,
'Loss/regularization_loss': 0.15212226,
'Loss/total_loss': 0.9479208,
'learning_rate': 0.0319994}
INFO:tensorflow:Step 200 per-step time 0.217s
I0723 20:39:17.979282 140626572066816 model_lib_v2.py:705] Step 200 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.24718821,
'Loss/localization_loss': 0.18433109,
'Loss/regularization_loss': 0.15225907,
'Loss/total_loss': 0.5837784,
'learning_rate': 0.0373328}
I0723 20:39:17.979578 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.24718821,
'Loss/localization_loss': 0.18433109,
'Loss/regularization_loss': 0.15225907,
'Loss/total_loss': 0.5837784,
'learning_rate': 0.0373328}
INFO:tensorflow:Step 300 per-step time 0.212s
I0723 20:39:39.140290 140626572066816 model_lib_v2.py:705] Step 300 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.18069258,
'Loss/localization_loss': 0.11041847,
'Loss/regularization_loss': 0.15235808,
'Loss/total_loss': 0.44346914,
'learning_rate': 0.0426662}
I0723 20:39:39.140685 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.18069258,
'Loss/localization_loss': 0.11041847,
'Loss/regularization_loss': 0.15235808,
'Loss/total_loss': 0.44346914,
'learning_rate': 0.0426662}
INFO:tensorflow:Step 400 per-step time 0.215s
I0723 20:40:00.599554 140626572066816 model_lib_v2.py:705] Step 400 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.13415363,
'Loss/localization_loss': 0.14021136,
'Loss/regularization_loss': 0.15226911,
'Loss/total_loss': 0.42663413,
'learning_rate': 0.047999598}
I0723 20:40:00.599951 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.13415363,
'Loss/localization_loss': 0.14021136,
'Loss/regularization_loss': 0.15226911,
'Loss/total_loss': 0.42663413,
'learning_rate': 0.047999598}
INFO:tensorflow:Step 500 per-step time 0.215s
I0723 20:40:22.074664 140626572066816 model_lib_v2.py:705] Step 500 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.19701691,
'Loss/localization_loss': 0.11526527,
'Loss/regularization_loss': 0.15218449,
'Loss/total_loss': 0.46446666,
'learning_rate': 0.053333}
I0723 20:40:22.075246 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.19701691,
'Loss/localization_loss': 0.11526527,
'Loss/regularization_loss': 0.15218449,
'Loss/total_loss': 0.46446666,
'learning_rate': 0.053333}
INFO:tensorflow:Step 600 per-step time 0.213s
I0723 20:40:43.351082 140626572066816 model_lib_v2.py:705] Step 600 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.15630332,
'Loss/localization_loss': 0.06089431,
'Loss/regularization_loss': 0.15216726,
'Loss/total_loss': 0.3693649,
'learning_rate': 0.0586664}
I0723 20:40:43.351499 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.15630332,
'Loss/localization_loss': 0.06089431,
'Loss/regularization_loss': 0.15216726,
'Loss/total_loss': 0.3693649,
'learning_rate': 0.0586664}
INFO:tensorflow:Step 700 per-step time 0.215s
I0723 20:41:04.811380 140626572066816 model_lib_v2.py:705] Step 700 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.18464486,
'Loss/localization_loss': 0.08743937,
'Loss/regularization_loss': 0.15242355,
'Loss/total_loss': 0.4245078,
'learning_rate': 0.0639998}
I0723 20:41:04.811739 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.18464486,
'Loss/localization_loss': 0.08743937,
'Loss/regularization_loss': 0.15242355,
'Loss/total_loss': 0.4245078,
'learning_rate': 0.0639998}
INFO:tensorflow:Step 800 per-step time 0.216s
I0723 20:41:26.424998 140626572066816 model_lib_v2.py:705] Step 800 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.19456601,
'Loss/localization_loss': 0.07052023,
'Loss/regularization_loss': 0.15225872,
'Loss/total_loss': 0.41734496,
'learning_rate': 0.069333196}
I0723 20:41:26.425402 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.19456601,
'Loss/localization_loss': 0.07052023,
'Loss/regularization_loss': 0.15225872,
'Loss/total_loss': 0.41734496,
'learning_rate': 0.069333196}
INFO:tensorflow:Step 900 per-step time 0.214s
I0723 20:41:47.794065 140626572066816 model_lib_v2.py:705] Step 900 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.14601721,
'Loss/localization_loss': 0.1250894,
'Loss/regularization_loss': 0.1519832,
'Loss/total_loss': 0.4230898,
'learning_rate': 0.074666604}
I0723 20:41:47.794632 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.14601721,
'Loss/localization_loss': 0.1250894,
'Loss/regularization_loss': 0.1519832,
'Loss/total_loss': 0.4230898,
'learning_rate': 0.074666604}
INFO:tensorflow:Step 1000 per-step time 0.213s
I0723 20:42:09.070731 140626572066816 model_lib_v2.py:705] Step 1000 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.0730916,
'Loss/localization_loss': 0.025868872,
'Loss/regularization_loss': 0.15157682,
'Loss/total_loss': 0.25053728,
'learning_rate': 0.08}
I0723 20:42:09.071125 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.0730916,
'Loss/localization_loss': 0.025868872,
'Loss/regularization_loss': 0.15157682,
'Loss/total_loss': 0.25053728,
'learning_rate': 0.08}
INFO:tensorflow:Step 1100 per-step time 0.233s
I0723 20:42:32.342074 140626572066816 model_lib_v2.py:705] Step 1100 per-step time 0.233s
INFO:tensorflow:{'Loss/classification_loss': 0.12552223,
'Loss/localization_loss': 0.072278656,
'Loss/regularization_loss': 0.15152097,
'Loss/total_loss': 0.34932184,
'learning_rate': 0.07999918}
I0723 20:42:32.342524 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.12552223,
'Loss/localization_loss': 0.072278656,
'Loss/regularization_loss': 0.15152097,
'Loss/total_loss': 0.34932184,
'learning_rate': 0.07999918}
INFO:tensorflow:Step 1200 per-step time 0.214s
I0723 20:42:53.775443 140626572066816 model_lib_v2.py:705] Step 1200 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.1024717,
'Loss/localization_loss': 0.05249763,
'Loss/regularization_loss': 0.15154642,
'Loss/total_loss': 0.30651575,
'learning_rate': 0.079996705}
I0723 20:42:53.775822 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.1024717,
'Loss/localization_loss': 0.05249763,
'Loss/regularization_loss': 0.15154642,
'Loss/total_loss': 0.30651575,
'learning_rate': 0.079996705}
INFO:tensorflow:Step 1300 per-step time 0.212s
I0723 20:43:15.006775 140626572066816 model_lib_v2.py:705] Step 1300 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.14109737,
'Loss/localization_loss': 0.044711865,
'Loss/regularization_loss': 0.1511576,
'Loss/total_loss': 0.3369668,
'learning_rate': 0.0799926}
I0723 20:43:15.007120 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.14109737,
'Loss/localization_loss': 0.044711865,
'Loss/regularization_loss': 0.1511576,
'Loss/total_loss': 0.3369668,
'learning_rate': 0.0799926}
INFO:tensorflow:Step 1400 per-step time 0.215s
I0723 20:43:36.488816 140626572066816 model_lib_v2.py:705] Step 1400 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.1603307,
'Loss/localization_loss': 0.07609439,
'Loss/regularization_loss': 0.15076539,
'Loss/total_loss': 0.38719046,
'learning_rate': 0.07998685}
I0723 20:43:36.489177 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.1603307,
'Loss/localization_loss': 0.07609439,
'Loss/regularization_loss': 0.15076539,
'Loss/total_loss': 0.38719046,
'learning_rate': 0.07998685}
INFO:tensorflow:Step 1500 per-step time 0.215s
I0723 20:43:57.949393 140626572066816 model_lib_v2.py:705] Step 1500 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.096087426,
'Loss/localization_loss': 0.06868634,
'Loss/regularization_loss': 0.15017423,
'Loss/total_loss': 0.314948,
'learning_rate': 0.07997945}
I0723 20:43:57.949732 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.096087426,
'Loss/localization_loss': 0.06868634,
'Loss/regularization_loss': 0.15017423,
'Loss/total_loss': 0.314948,
'learning_rate': 0.07997945}
INFO:tensorflow:Step 1600 per-step time 0.212s
I0723 20:44:19.160944 140626572066816 model_lib_v2.py:705] Step 1600 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.1322641,
'Loss/localization_loss': 0.046820853,
'Loss/regularization_loss': 0.14974554,
'Loss/total_loss': 0.32883048,
'learning_rate': 0.079970405}
I0723 20:44:19.161285 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.1322641,
'Loss/localization_loss': 0.046820853,
'Loss/regularization_loss': 0.14974554,
'Loss/total_loss': 0.32883048,
'learning_rate': 0.079970405}
INFO:tensorflow:Step 1700 per-step time 0.212s
I0723 20:44:40.349589 140626572066816 model_lib_v2.py:705] Step 1700 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.12944028,
'Loss/localization_loss': 0.05551131,
'Loss/regularization_loss': 0.14934602,
'Loss/total_loss': 0.3342976,
'learning_rate': 0.07995972}
I0723 20:44:40.349896 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.12944028,
'Loss/localization_loss': 0.05551131,
'Loss/regularization_loss': 0.14934602,
'Loss/total_loss': 0.3342976,
'learning_rate': 0.07995972}
INFO:tensorflow:Step 1800 per-step time 0.212s
I0723 20:45:01.582890 140626572066816 model_lib_v2.py:705] Step 1800 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.13259953,
'Loss/localization_loss': 0.081035905,
'Loss/regularization_loss': 0.14881085,
'Loss/total_loss': 0.3624463,
'learning_rate': 0.0799474}
I0723 20:45:01.583213 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.13259953,
'Loss/localization_loss': 0.081035905,
'Loss/regularization_loss': 0.14881085,
'Loss/total_loss': 0.3624463,
'learning_rate': 0.0799474}
INFO:tensorflow:Step 1900 per-step time 0.213s
I0723 20:45:22.913830 140626572066816 model_lib_v2.py:705] Step 1900 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.20807305,
'Loss/localization_loss': 0.09191676,
'Loss/regularization_loss': 0.14862858,
'Loss/total_loss': 0.4486184,
'learning_rate': 0.07993342}
I0723 20:45:22.914112 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.20807305,
'Loss/localization_loss': 0.09191676,
'Loss/regularization_loss': 0.14862858,
'Loss/total_loss': 0.4486184,
'learning_rate': 0.07993342}
INFO:tensorflow:Step 2000 per-step time 0.211s
I0723 20:45:43.967232 140626572066816 model_lib_v2.py:705] Step 2000 per-step time 0.211s
INFO:tensorflow:{'Loss/classification_loss': 0.065590516,
'Loss/localization_loss': 0.039865423,
'Loss/regularization_loss': 0.14833932,
'Loss/total_loss': 0.25379527,
'learning_rate': 0.07991781}
I0723 20:45:43.967638 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.065590516,
'Loss/localization_loss': 0.039865423,
'Loss/regularization_loss': 0.14833932,
'Loss/total_loss': 0.25379527,
'learning_rate': 0.07991781}
INFO:tensorflow:Step 2100 per-step time 0.233s
I0723 20:46:07.276439 140626572066816 model_lib_v2.py:705] Step 2100 per-step time 0.233s
INFO:tensorflow:{'Loss/classification_loss': 0.07048755,
'Loss/localization_loss': 0.03492248,
'Loss/regularization_loss': 0.14779572,
'Loss/total_loss': 0.25320575,
'learning_rate': 0.07990056}
I0723 20:46:07.276792 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.07048755,
'Loss/localization_loss': 0.03492248,
'Loss/regularization_loss': 0.14779572,
'Loss/total_loss': 0.25320575,
'learning_rate': 0.07990056}
INFO:tensorflow:Step 2200 per-step time 0.213s
I0723 20:46:28.614902 140626572066816 model_lib_v2.py:705] Step 2200 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.07064696,
'Loss/localization_loss': 0.032722306,
'Loss/regularization_loss': 0.14712417,
'Loss/total_loss': 0.25049344,
'learning_rate': 0.07988167}
I0723 20:46:28.615194 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.07064696,
'Loss/localization_loss': 0.032722306,
'Loss/regularization_loss': 0.14712417,
'Loss/total_loss': 0.25049344,
'learning_rate': 0.07988167}
INFO:tensorflow:Step 2300 per-step time 0.214s
I0723 20:46:50.047530 140626572066816 model_lib_v2.py:705] Step 2300 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.12946023,
'Loss/localization_loss': 0.06818632,
'Loss/regularization_loss': 0.14645767,
'Loss/total_loss': 0.34410423,
'learning_rate': 0.07986114}
I0723 20:46:50.047919 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.12946023,
'Loss/localization_loss': 0.06818632,
'Loss/regularization_loss': 0.14645767,
'Loss/total_loss': 0.34410423,
'learning_rate': 0.07986114}
INFO:tensorflow:Step 2400 per-step time 0.213s
I0723 20:47:11.364598 140626572066816 model_lib_v2.py:705] Step 2400 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.066845894,
'Loss/localization_loss': 0.047452338,
'Loss/regularization_loss': 0.14574118,
'Loss/total_loss': 0.26003942,
'learning_rate': 0.07983897}
I0723 20:47:11.365001 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.066845894,
'Loss/localization_loss': 0.047452338,
'Loss/regularization_loss': 0.14574118,
'Loss/total_loss': 0.26003942,
'learning_rate': 0.07983897}
INFO:tensorflow:Step 2500 per-step time 0.214s
I0723 20:47:32.770769 140626572066816 model_lib_v2.py:705] Step 2500 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.056935947,
'Loss/localization_loss': 0.043007232,
'Loss/regularization_loss': 0.14505404,
'Loss/total_loss': 0.24499722,
'learning_rate': 0.079815164}
I0723 20:47:32.771111 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.056935947,
'Loss/localization_loss': 0.043007232,
'Loss/regularization_loss': 0.14505404,
'Loss/total_loss': 0.24499722,
'learning_rate': 0.079815164}
INFO:tensorflow:Step 2600 per-step time 0.213s
I0723 20:47:54.082281 140626572066816 model_lib_v2.py:705] Step 2600 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.07514197,
'Loss/localization_loss': 0.035738807,
'Loss/regularization_loss': 0.14433889,
'Loss/total_loss': 0.25521967,
'learning_rate': 0.07978972}
I0723 20:47:54.082704 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.07514197,
'Loss/localization_loss': 0.035738807,
'Loss/regularization_loss': 0.14433889,
'Loss/total_loss': 0.25521967,
'learning_rate': 0.07978972}
INFO:tensorflow:Step 2700 per-step time 0.215s
I0723 20:48:15.589483 140626572066816 model_lib_v2.py:705] Step 2700 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.040342115,
'Loss/localization_loss': 0.00990196,
'Loss/regularization_loss': 0.1436622,
'Loss/total_loss': 0.19390628,
'learning_rate': 0.07976264}
I0723 20:48:15.589788 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.040342115,
'Loss/localization_loss': 0.00990196,
'Loss/regularization_loss': 0.1436622,
'Loss/total_loss': 0.19390628,
'learning_rate': 0.07976264}
INFO:tensorflow:Step 2800 per-step time 0.215s
I0723 20:48:37.109645 140626572066816 model_lib_v2.py:705] Step 2800 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.029220097,
'Loss/localization_loss': 0.009013002,
'Loss/regularization_loss': 0.14291187,
'Loss/total_loss': 0.18114497,
'learning_rate': 0.07973392}
I0723 20:48:37.110089 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.029220097,
'Loss/localization_loss': 0.009013002,
'Loss/regularization_loss': 0.14291187,
'Loss/total_loss': 0.18114497,
'learning_rate': 0.07973392}
INFO:tensorflow:Step 2900 per-step time 0.212s
I0723 20:48:58.341233 140626572066816 model_lib_v2.py:705] Step 2900 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.12718447,
'Loss/localization_loss': 0.08292331,
'Loss/regularization_loss': 0.14212796,
'Loss/total_loss': 0.35223573,
'learning_rate': 0.07970358}
I0723 20:48:58.341670 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.12718447,
'Loss/localization_loss': 0.08292331,
'Loss/regularization_loss': 0.14212796,
'Loss/total_loss': 0.35223573,
'learning_rate': 0.07970358}
INFO:tensorflow:Step 3000 per-step time 0.214s
I0723 20:49:19.721647 140626572066816 model_lib_v2.py:705] Step 3000 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.05248206,
'Loss/localization_loss': 0.05929486,
'Loss/regularization_loss': 0.14133215,
'Loss/total_loss': 0.25310907,
'learning_rate': 0.0796716}
I0723 20:49:19.722021 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.05248206,
'Loss/localization_loss': 0.05929486,
'Loss/regularization_loss': 0.14133215,
'Loss/total_loss': 0.25310907,
'learning_rate': 0.0796716}
INFO:tensorflow:Step 3100 per-step time 0.233s
I0723 20:49:42.980041 140626572066816 model_lib_v2.py:705] Step 3100 per-step time 0.233s
INFO:tensorflow:{'Loss/classification_loss': 0.02787326,
'Loss/localization_loss': 0.015421426,
'Loss/regularization_loss': 0.14057848,
'Loss/total_loss': 0.18387316,
'learning_rate': 0.07963799}
I0723 20:49:42.980445 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.02787326,
'Loss/localization_loss': 0.015421426,
'Loss/regularization_loss': 0.14057848,
'Loss/total_loss': 0.18387316,
'learning_rate': 0.07963799}
INFO:tensorflow:Step 3200 per-step time 0.214s
I0723 20:50:04.361235 140626572066816 model_lib_v2.py:705] Step 3200 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.051457774,
'Loss/localization_loss': 0.037896447,
'Loss/regularization_loss': 0.13984938,
'Loss/total_loss': 0.2292036,
'learning_rate': 0.07960275}
I0723 20:50:04.361625 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.051457774,
'Loss/localization_loss': 0.037896447,
'Loss/regularization_loss': 0.13984938,
'Loss/total_loss': 0.2292036,
'learning_rate': 0.07960275}
INFO:tensorflow:Step 3300 per-step time 0.217s
I0723 20:50:26.088505 140626572066816 model_lib_v2.py:705] Step 3300 per-step time 0.217s
INFO:tensorflow:{'Loss/classification_loss': 0.07204049,
'Loss/localization_loss': 0.029298523,
'Loss/regularization_loss': 0.13914883,
'Loss/total_loss': 0.24048784,
'learning_rate': 0.07956588}
I0723 20:50:26.088937 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.07204049,
'Loss/localization_loss': 0.029298523,
'Loss/regularization_loss': 0.13914883,
'Loss/total_loss': 0.24048784,
'learning_rate': 0.07956588}
INFO:tensorflow:Step 3400 per-step time 0.215s
I0723 20:50:47.616233 140626572066816 model_lib_v2.py:705] Step 3400 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.04224451,
'Loss/localization_loss': 0.026293626,
'Loss/regularization_loss': 0.13841993,
'Loss/total_loss': 0.20695806,
'learning_rate': 0.079527386}
I0723 20:50:47.616593 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.04224451,
'Loss/localization_loss': 0.026293626,
'Loss/regularization_loss': 0.13841993,
'Loss/total_loss': 0.20695806,
'learning_rate': 0.079527386}
INFO:tensorflow:Step 3500 per-step time 0.214s
I0723 20:51:08.991382 140626572066816 model_lib_v2.py:705] Step 3500 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.053126764,
'Loss/localization_loss': 0.02088539,
'Loss/regularization_loss': 0.13768691,
'Loss/total_loss': 0.21169907,
'learning_rate': 0.07948727}
I0723 20:51:08.991719 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.053126764,
'Loss/localization_loss': 0.02088539,
'Loss/regularization_loss': 0.13768691,
'Loss/total_loss': 0.21169907,
'learning_rate': 0.07948727}
INFO:tensorflow:Step 3600 per-step time 0.211s
I0723 20:51:30.119030 140626572066816 model_lib_v2.py:705] Step 3600 per-step time 0.211s
INFO:tensorflow:{'Loss/classification_loss': 0.07779261,
'Loss/localization_loss': 0.03236228,
'Loss/regularization_loss': 0.13698612,
'Loss/total_loss': 0.247141,
'learning_rate': 0.079445526}
I0723 20:51:30.119377 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.07779261,
'Loss/localization_loss': 0.03236228,
'Loss/regularization_loss': 0.13698612,
'Loss/total_loss': 0.247141,
'learning_rate': 0.079445526}
INFO:tensorflow:Step 3700 per-step time 0.212s
I0723 20:51:51.336414 140626572066816 model_lib_v2.py:705] Step 3700 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.025829343,
'Loss/localization_loss': 0.015464746,
'Loss/regularization_loss': 0.13631509,
'Loss/total_loss': 0.17760918,
'learning_rate': 0.07940216}
I0723 20:51:51.336753 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.025829343,
'Loss/localization_loss': 0.015464746,
'Loss/regularization_loss': 0.13631509,
'Loss/total_loss': 0.17760918,
'learning_rate': 0.07940216}
INFO:tensorflow:Step 3800 per-step time 0.213s
I0723 20:52:12.633708 140626572066816 model_lib_v2.py:705] Step 3800 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.019798258,
'Loss/localization_loss': 0.01289745,
'Loss/regularization_loss': 0.13565895,
'Loss/total_loss': 0.16835466,
'learning_rate': 0.079357184}
I0723 20:52:12.634003 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.019798258,
'Loss/localization_loss': 0.01289745,
'Loss/regularization_loss': 0.13565895,
'Loss/total_loss': 0.16835466,
'learning_rate': 0.079357184}
INFO:tensorflow:Step 3900 per-step time 0.212s
I0723 20:52:33.863171 140626572066816 model_lib_v2.py:705] Step 3900 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.026984489,
'Loss/localization_loss': 0.01571266,
'Loss/regularization_loss': 0.13494258,
'Loss/total_loss': 0.17763972,
'learning_rate': 0.07931058}
I0723 20:52:33.863554 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.026984489,
'Loss/localization_loss': 0.01571266,
'Loss/regularization_loss': 0.13494258,
'Loss/total_loss': 0.17763972,
'learning_rate': 0.07931058}
INFO:tensorflow:Step 4000 per-step time 0.215s
I0723 20:52:55.315957 140626572066816 model_lib_v2.py:705] Step 4000 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.033078447,
'Loss/localization_loss': 0.020152707,
'Loss/regularization_loss': 0.13420762,
'Loss/total_loss': 0.18743877,
'learning_rate': 0.07926236}
I0723 20:52:55.316276 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.033078447,
'Loss/localization_loss': 0.020152707,
'Loss/regularization_loss': 0.13420762,
'Loss/total_loss': 0.18743877,
'learning_rate': 0.07926236}
INFO:tensorflow:Step 4100 per-step time 0.223s
I0723 20:53:17.576137 140626572066816 model_lib_v2.py:705] Step 4100 per-step time 0.223s
INFO:tensorflow:{'Loss/classification_loss': 0.017920459,
'Loss/localization_loss': 0.02094631,
'Loss/regularization_loss': 0.1335444,
'Loss/total_loss': 0.17241117,
'learning_rate': 0.07921253}
I0723 20:53:17.576457 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.017920459,
'Loss/localization_loss': 0.02094631,
'Loss/regularization_loss': 0.1335444,
'Loss/total_loss': 0.17241117,
'learning_rate': 0.07921253}
INFO:tensorflow:Step 4200 per-step time 0.213s
I0723 20:53:38.851945 140626572066816 model_lib_v2.py:705] Step 4200 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.022387782,
'Loss/localization_loss': 0.015163237,
'Loss/regularization_loss': 0.13281494,
'Loss/total_loss': 0.17036596,
'learning_rate': 0.07916109}
I0723 20:53:38.852239 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.022387782,
'Loss/localization_loss': 0.015163237,
'Loss/regularization_loss': 0.13281494,
'Loss/total_loss': 0.17036596,
'learning_rate': 0.07916109}
INFO:tensorflow:Step 4300 per-step time 0.212s
I0723 20:54:00.103778 140626572066816 model_lib_v2.py:705] Step 4300 per-step time 0.212s
INFO:tensorflow:{'Loss/classification_loss': 0.062405866,
'Loss/localization_loss': 0.025872342,
'Loss/regularization_loss': 0.13396084,
'Loss/total_loss': 0.22223905,
'learning_rate': 0.07910804}
I0723 20:54:00.104200 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.062405866,
'Loss/localization_loss': 0.025872342,
'Loss/regularization_loss': 0.13396084,
'Loss/total_loss': 0.22223905,
'learning_rate': 0.07910804}
INFO:tensorflow:Step 4400 per-step time 0.213s
I0723 20:54:21.379106 140626572066816 model_lib_v2.py:705] Step 4400 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.06676763,
'Loss/localization_loss': 0.03919744,
'Loss/regularization_loss': 0.13458088,
'Loss/total_loss': 0.24054596,
'learning_rate': 0.07905338}
I0723 20:54:21.379535 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.06676763,
'Loss/localization_loss': 0.03919744,
'Loss/regularization_loss': 0.13458088,
'Loss/total_loss': 0.24054596,
'learning_rate': 0.07905338}
INFO:tensorflow:Step 4500 per-step time 0.214s
I0723 20:54:42.731768 140626572066816 model_lib_v2.py:705] Step 4500 per-step time 0.214s
INFO:tensorflow:{'Loss/classification_loss': 0.052128218,
'Loss/localization_loss': 0.0403441,
'Loss/regularization_loss': 0.13428211,
'Loss/total_loss': 0.22675443,
'learning_rate': 0.07899711}
I0723 20:54:42.732155 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.052128218,
'Loss/localization_loss': 0.0403441,
'Loss/regularization_loss': 0.13428211,
'Loss/total_loss': 0.22675443,
'learning_rate': 0.07899711}
INFO:tensorflow:Step 4600 per-step time 0.216s
I0723 20:55:04.340383 140626572066816 model_lib_v2.py:705] Step 4600 per-step time 0.216s
INFO:tensorflow:{'Loss/classification_loss': 0.046954993,
'Loss/localization_loss': 0.016242914,
'Loss/regularization_loss': 0.13367942,
'Loss/total_loss': 0.19687733,
'learning_rate': 0.078939244}
I0723 20:55:04.340785 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.046954993,
'Loss/localization_loss': 0.016242914,
'Loss/regularization_loss': 0.13367942,
'Loss/total_loss': 0.19687733,
'learning_rate': 0.078939244}
INFO:tensorflow:Step 4700 per-step time 0.215s
I0723 20:55:25.876799 140626572066816 model_lib_v2.py:705] Step 4700 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.027930265,
'Loss/localization_loss': 0.010687997,
'Loss/regularization_loss': 0.13298608,
'Loss/total_loss': 0.17160435,
'learning_rate': 0.07887978}
I0723 20:55:25.877180 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.027930265,
'Loss/localization_loss': 0.010687997,
'Loss/regularization_loss': 0.13298608,
'Loss/total_loss': 0.17160435,
'learning_rate': 0.07887978}
INFO:tensorflow:Step 4800 per-step time 0.215s
I0723 20:55:47.369318 140626572066816 model_lib_v2.py:705] Step 4800 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.03882729,
'Loss/localization_loss': 0.02785907,
'Loss/regularization_loss': 0.13228287,
'Loss/total_loss': 0.19896923,
'learning_rate': 0.07881871}
I0723 20:55:47.369722 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.03882729,
'Loss/localization_loss': 0.02785907,
'Loss/regularization_loss': 0.13228287,
'Loss/total_loss': 0.19896923,
'learning_rate': 0.07881871}
INFO:tensorflow:Step 4900 per-step time 0.215s
I0723 20:56:08.829562 140626572066816 model_lib_v2.py:705] Step 4900 per-step time 0.215s
INFO:tensorflow:{'Loss/classification_loss': 0.024931258,
'Loss/localization_loss': 0.01004631,
'Loss/regularization_loss': 0.13159756,
'Loss/total_loss': 0.16657513,
'learning_rate': 0.07875605}
I0723 20:56:08.829966 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.024931258,
'Loss/localization_loss': 0.01004631,
'Loss/regularization_loss': 0.13159756,
'Loss/total_loss': 0.16657513,
'learning_rate': 0.07875605}
INFO:tensorflow:Step 5000 per-step time 0.213s
I0723 20:56:30.151069 140626572066816 model_lib_v2.py:705] Step 5000 per-step time 0.213s
INFO:tensorflow:{'Loss/classification_loss': 0.034125406,
'Loss/localization_loss': 0.010182411,
'Loss/regularization_loss': 0.13107517,
'Loss/total_loss': 0.17538299,
'learning_rate': 0.078691795}
I0723 20:56:30.151575 140626572066816 model_lib_v2.py:708] {'Loss/classification_loss': 0.034125406,
'Loss/localization_loss': 0.010182411,
'Loss/regularization_loss': 0.13107517,
'Loss/total_loss': 0.17538299,
'learning_rate': 0.078691795}
# Install this package for compatibility
!pip install pillow==9.5
Collecting pillow==9.5
Downloading Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl (3.4 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.4/3.4 MB 12.3 MB/s eta 0:00:00
Installing collected packages: pillow
Attempting uninstall: pillow
Found existing installation: Pillow 8.4.0
Uninstalling Pillow-8.4.0:
Successfully uninstalled Pillow-8.4.0
Successfully installed pillow-9.5.0
Mean Average Precision Formula:
$$
mAP = \frac{1}{N}\sum\limits_{i=1}^N AP_i \\
$$
where AP = Average Precision
# Evaluate the tuned model on validation set
command = f"python {TRAINING_SCRIPT} --model_dir={MODEL_DIR} --pipeline_config_path={PIPELINE_CONFIG} --checkpoint_dir={MODEL_DIR}"
!{command}
2023-07-23 21:11:01.669388: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
warnings.warn(f"file system plugins are not loaded: {e}")
WARNING:tensorflow:Forced number of epochs for all eval validations to be 1.
W0723 21:11:06.576776 135541519040512 model_lib_v2.py:1089] Forced number of epochs for all eval validations to be 1.
INFO:tensorflow:Maybe overwriting sample_1_of_n_eval_examples: None
I0723 21:11:06.577058 135541519040512 config_util.py:552] Maybe overwriting sample_1_of_n_eval_examples: None
INFO:tensorflow:Maybe overwriting use_bfloat16: False
I0723 21:11:06.577173 135541519040512 config_util.py:552] Maybe overwriting use_bfloat16: False
INFO:tensorflow:Maybe overwriting eval_num_epochs: 1
I0723 21:11:06.577288 135541519040512 config_util.py:552] Maybe overwriting eval_num_epochs: 1
WARNING:tensorflow:Expected number of evaluation epochs is 1, but instead encountered `eval_on_train_input_config.num_epochs` = 0. Overwriting `num_epochs` to 1.
W0723 21:11:06.577460 135541519040512 model_lib_v2.py:1106] Expected number of evaluation epochs is 1, but instead encountered `eval_on_train_input_config.num_epochs` = 0. Overwriting `num_epochs` to 1.
2023-07-23 21:11:08.844099: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
INFO:tensorflow:Reading unweighted datasets: ['/content/Tensorflow/training/annotations/valid.record']
I0723 21:11:09.617485 135541519040512 dataset_builder.py:162] Reading unweighted datasets: ['/content/Tensorflow/training/annotations/valid.record']
INFO:tensorflow:Reading record datasets for input file: ['/content/Tensorflow/training/annotations/valid.record']
I0723 21:11:09.617808 135541519040512 dataset_builder.py:79] Reading record datasets for input file: ['/content/Tensorflow/training/annotations/valid.record']
INFO:tensorflow:Number of filenames to read: 1
I0723 21:11:09.617930 135541519040512 dataset_builder.py:80] Number of filenames to read: 1
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
W0723 21:11:09.618019 135541519040512 dataset_builder.py:86] num_readers has been reduced to 1 to match input file shards.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
W0723 21:11:09.632118 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:100: parallel_interleave (from tensorflow.python.data.experimental.ops.interleave_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, num_parallel_calls=tf.data.AUTOTUNE)` instead. If sloppy execution is desired, use `tf.data.Options.deterministic`.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
W0723 21:11:09.735780 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/object_detection/builders/dataset_builder.py:235: DatasetV1.map_with_legacy_function (from tensorflow.python.data.ops.dataset_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Dataset.map()
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
W0723 21:11:22.173449 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: sparse_to_dense (from tensorflow.python.ops.sparse_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0723 21:11:23.267807 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
INFO:tensorflow:Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
I0723 21:11:25.744795 135541519040512 checkpoint_utils.py:168] Waiting for new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8
INFO:tensorflow:Found new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ckpt-6
I0723 21:11:25.745691 135541519040512 checkpoint_utils.py:177] Found new checkpoint at /content/Tensorflow/training/models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ckpt-6
/usr/local/lib/python3.10/dist-packages/keras/src/backend.py:452: UserWarning: `tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.
warnings.warn(
I0723 21:11:31.595580 135541519040512 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 21:11:47.698973 135541519040512 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
W0723 21:11:54.674696 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/util/dispatch.py:1176: to_int64 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.cast` instead.
INFO:tensorflow:Finished eval step 0
I0723 21:11:54.718118 135541519040512 model_lib_v2.py:966] Finished eval step 0
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:460: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.
Instructions for updating:
tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
W0723 21:11:54.962739 135541519040512 deprecation.py:364] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:460: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.
Instructions for updating:
tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
INFO:tensorflow:Finished eval step 100
I0723 21:12:05.808678 135541519040512 model_lib_v2.py:966] Finished eval step 100
INFO:tensorflow:Performing evaluation on 176 images.
I0723 21:12:10.451332 135541519040512 coco_evaluation.py:293] Performing evaluation on 176 images.
creating index...
index created!
INFO:tensorflow:Loading and preparing annotation results...
I0723 21:12:10.453507 135541519040512 coco_tools.py:116] Loading and preparing annotation results...
INFO:tensorflow:DONE (t=0.01s)
I0723 21:12:10.465435 135541519040512 coco_tools.py:138] DONE (t=0.01s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=0.41s).
Accumulating evaluation results...
DONE (t=0.23s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.638
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.824
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.770
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.455
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.738
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.803
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.639
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.715
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.725
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.540
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.801
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.823
INFO:tensorflow:Eval metrics at step 5000
I0723 21:12:11.147585 135541519040512 model_lib_v2.py:1015] Eval metrics at step 5000
INFO:tensorflow: + DetectionBoxes_Precision/mAP: 0.637941
I0723 21:12:11.160488 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP: 0.637941
INFO:tensorflow: + DetectionBoxes_Precision/mAP@.50IOU: 0.824165
I0723 21:12:11.162297 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP@.50IOU: 0.824165
INFO:tensorflow: + DetectionBoxes_Precision/mAP@.75IOU: 0.770128
I0723 21:12:11.163916 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP@.75IOU: 0.770128
INFO:tensorflow: + DetectionBoxes_Precision/mAP (small): 0.455455
I0723 21:12:11.165367 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP (small): 0.455455
INFO:tensorflow: + DetectionBoxes_Precision/mAP (medium): 0.737855
I0723 21:12:11.166779 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP (medium): 0.737855
INFO:tensorflow: + DetectionBoxes_Precision/mAP (large): 0.803029
I0723 21:12:11.168224 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Precision/mAP (large): 0.803029
INFO:tensorflow: + DetectionBoxes_Recall/AR@1: 0.638616
I0723 21:12:11.169818 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@1: 0.638616
INFO:tensorflow: + DetectionBoxes_Recall/AR@10: 0.714501
I0723 21:12:11.171224 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@10: 0.714501
INFO:tensorflow: + DetectionBoxes_Recall/AR@100: 0.724527
I0723 21:12:11.172803 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@100: 0.724527
INFO:tensorflow: + DetectionBoxes_Recall/AR@100 (small): 0.539640
I0723 21:12:11.174288 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@100 (small): 0.539640
INFO:tensorflow: + DetectionBoxes_Recall/AR@100 (medium): 0.800680
I0723 21:12:11.175742 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@100 (medium): 0.800680
INFO:tensorflow: + DetectionBoxes_Recall/AR@100 (large): 0.823164
I0723 21:12:11.177278 135541519040512 model_lib_v2.py:1018] + DetectionBoxes_Recall/AR@100 (large): 0.823164
INFO:tensorflow: + Loss/localization_loss: 0.049456
I0723 21:12:11.178404 135541519040512 model_lib_v2.py:1018] + Loss/localization_loss: 0.049456
INFO:tensorflow: + Loss/classification_loss: 0.140257
I0723 21:12:11.179610 135541519040512 model_lib_v2.py:1018] + Loss/classification_loss: 0.140257
INFO:tensorflow: + Loss/regularization_loss: 0.131068
I0723 21:12:11.180717 135541519040512 model_lib_v2.py:1018] + Loss/regularization_loss: 0.131068
INFO:tensorflow: + Loss/total_loss: 0.320781
I0723 21:12:11.181789 135541519040512 model_lib_v2.py:1018] + Loss/total_loss: 0.320781
Traceback (most recent call last):
File "/content/Tensorflow/models/research/object_detection/model_main_tf2.py", line 114, in <module>
tf.compat.v1.app.run()
File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/app.py", line 36, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/usr/local/lib/python3.10/dist-packages/absl/app.py", line 308, in run
_run_main(main, args)
File "/usr/local/lib/python3.10/dist-packages/absl/app.py", line 254, in _run_main
File "/content/Tensorflow/models/research/object_detection/model_main_tf2.py", line 81, in main
model_lib_v2.eval_continuously(
File "/usr/local/lib/python3.10/dist-packages/object_detection/model_lib_v2.py", line 1135, in eval_continuously
for latest_checkpoint in tf.train.checkpoints_iterator(
File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/training/checkpoint_utils.py", line 244, in checkpoints_iterator
time.sleep(time_to_next_eval)
KeyboardInterrupt
^C
# Copy exporter script from Tensorflow/models/research/ to Tensorflow/training/
shutil.copy("models/research/object_detection/exporter_main_v2.py", "training")
'training/exporter_main_v2.py'
# Run the exporter script to save the model
%cd training
command = f"python exporter_main_v2.py --input_type image_tensor --pipeline_config_path {PIPELINE_CONFIG} --trained_checkpoint_dir {MODEL_DIR} --output_directory exported-models/{MODEL_NAME}"
!{command}
/content/Tensorflow/training
2023-07-23 21:16:27.420815: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6Status12empty_stringB5cxx11Ev']
warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/usr/local/lib/python3.10/dist-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZNK10tensorflow4data11DatasetBase8FinalizeEPNS_15OpKernelContextESt8functionIFN3tsl8StatusOrISt10unique_ptrIS1_NS5_4core15RefCountDeleterEEEEvEE']
warnings.warn(f"file system plugins are not loaded: {e}")
2023-07-23 21:16:33.359720: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:47] Overriding orig_value setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.
WARNING:tensorflow:From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:459: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
W0723 21:16:33.678606 138137870114816 deprecation.py:641] From /usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py:459: calling map_fn_v2 (from tensorflow.python.ops.map_fn) with back_prop=False is deprecated and will be removed in a future version.
Instructions for updating:
back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))
I0723 21:16:37.665414 138137870114816 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 21:16:51.366565 138137870114816 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
I0723 21:16:54.348509 138137870114816 signature_serialization.py:148] Function `call_func` contains input name(s) resource with unsupported characters which will be renamed to weightsharedconvolutionalboxpredictor_predictiontower_conv2d_3_batchnorm_feature_4_fusedbatchnormv3_readvariableop_1_resource in the SavedModel.
I0723 21:16:55.554375 138137870114816 api.py:460] feature_map_spatial_dims: [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7da24c4d3280>, because it is not built.
W0723 21:16:58.108711 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.meta_architectures.ssd_meta_arch.SSDMetaArch object at 0x7da24c4d3280>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22d8e91e0>, because it is not built.
W0723 21:16:58.385001 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22d8e91e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffd2d70>, because it is not built.
W0723 21:16:58.385216 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffd2d70>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d865ab0>, because it is not built.
W0723 21:16:58.385327 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d865ab0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22d8667d0>, because it is not built.
W0723 21:16:58.385426 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22d8667d0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8670d0>, because it is not built.
W0723 21:16:58.385533 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8670d0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2e830>, because it is not built.
W0723 21:16:58.385632 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2e830>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22ff2fbb0>, because it is not built.
W0723 21:16:58.385730 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22ff2fbb0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ff2c3a0>, because it is not built.
W0723 21:16:58.385809 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ff2c3a0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2f130>, because it is not built.
W0723 21:16:58.385886 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2f130>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22ff2c1f0>, because it is not built.
W0723 21:16:58.385961 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.convolutional.separable_conv2d.SeparableConv2D object at 0x7da22ff2c1f0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ff2d660>, because it is not built.
W0723 21:16:58.386037 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ff2d660>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2c5e0>, because it is not built.
W0723 21:16:58.386114 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ff2c5e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d816d10>, because it is not built.
W0723 21:16:58.386198 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d816d10>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8165c0>, because it is not built.
W0723 21:16:58.386277 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8165c0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb04f0>, because it is not built.
W0723 21:16:58.386389 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb04f0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb1450>, because it is not built.
W0723 21:16:58.386469 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb1450>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb1fc0>, because it is not built.
W0723 21:16:58.386546 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb1fc0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb17e0>, because it is not built.
W0723 21:16:58.386630 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb17e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb0d90>, because it is not built.
W0723 21:16:58.386707 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22ffb0d90>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb0220>, because it is not built.
W0723 21:16:58.386779 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22ffb0220>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8a99c0>, because it is not built.
W0723 21:16:58.386854 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8a99c0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8b9ab0>, because it is not built.
W0723 21:16:58.386928 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8b9ab0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8b9480>, because it is not built.
W0723 21:16:58.387000 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8b9480>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8ba500>, because it is not built.
W0723 21:16:58.387071 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8ba500>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8b9720>, because it is not built.
W0723 21:16:58.387142 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8b9720>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8b9390>, because it is not built.
W0723 21:16:58.387214 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da22d8b9390>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8ba260>, because it is not built.
W0723 21:16:58.387292 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d8ba260>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da2546e63b0>, because it is not built.
W0723 21:16:58.387377 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da2546e63b0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d84d1e0>, because it is not built.
W0723 21:16:58.387462 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da22d84d1e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2f310>, because it is not built.
W0723 21:16:58.387537 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2f310>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2f2e0>, because it is not built.
W0723 21:16:58.387609 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2f2e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2ca00>, because it is not built.
W0723 21:16:58.387681 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2ca00>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2e0e0>, because it is not built.
W0723 21:16:58.387758 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2e0e0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2d720>, because it is not built.
W0723 21:16:58.387829 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2d720>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2f3d0>, because it is not built.
W0723 21:16:58.387899 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2f3d0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2d540>, because it is not built.
W0723 21:16:58.387973 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f2d540>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2c040>, because it is not built.
W0723 21:16:58.388044 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f2c040>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8df60>, because it is not built.
W0723 21:16:58.388116 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8df60>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8cfd0>, because it is not built.
W0723 21:16:58.388187 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8cfd0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8cee0>, because it is not built.
W0723 21:16:58.388262 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8cee0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8dbd0>, because it is not built.
W0723 21:16:58.388334 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8dbd0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8e4a0>, because it is not built.
W0723 21:16:58.388416 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8e4a0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8eda0>, because it is not built.
W0723 21:16:58.388488 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <object_detection.core.freezable_batch_norm.FreezableBatchNorm object at 0x7da227f8eda0>, because it is not built.
WARNING:tensorflow:Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8e530>, because it is not built.
W0723 21:16:58.466122 138137870114816 save_impl.py:66] Skipping full serialization of Keras layer <keras.src.layers.core.lambda_layer.Lambda object at 0x7da227f8e530>, because it is not built.
I0723 21:17:16.015402 138137870114816 save.py:274] Found untraced functions such as WeightSharedConvolutionalBoxPredictor_layer_call_fn, WeightSharedConvolutionalBoxPredictor_layer_call_and_return_conditional_losses, WeightSharedConvolutionalBoxHead_layer_call_fn, WeightSharedConvolutionalBoxHead_layer_call_and_return_conditional_losses, WeightSharedConvolutionalClassHead_layer_call_fn while saving (showing 5 of 173). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets
I0723 21:17:23.175528 138137870114816 builder_impl.py:804] Assets written to: exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets
I0723 21:17:23.616219 138137870114816 fingerprinting_utils.py:48] Writing fingerprint to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/fingerprint.pb
INFO:tensorflow:Writing pipeline config file to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config
I0723 21:17:24.400198 138137870114816 config_util.py:253] Writing pipeline config file to exported-models/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config
# Zip the folder containing the finetuned model
%cd exported-models
command = f"zip -r {MODEL_NAME}.zip {MODEL_NAME}"
!{command}
%cd ..
/content/Tensorflow/training/exported-models adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/ (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/ (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/saved_model.pb (deflated 92%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/fingerprint.pb (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/assets/ (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/ (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/variables.index (deflated 78%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/saved_model/variables/variables.data-00000-of-00001 (deflated 9%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ (stored 0%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0.index (deflated 80%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/checkpoint (deflated 40%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/checkpoint/ckpt-0.data-00000-of-00001 (deflated 8%) adding: ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8/pipeline.config (deflated 69%) /content/Tensorflow/training
Note: Restart runtime to avoid error.
After restarting runtime, the current directory is /content/
import time
import os
import cv2
import tensorflow as tf
import numpy as np
import gdown
from zipfile import ZipFile
from google.colab.patches import cv2_imshow
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
Note: Update the correct MODEL NAME here for inference
MODEL_NAME = "ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8"
TEST_MODEL_PATH = f"Tensorflow/testing/{MODEL_NAME}/saved_model"
CONFIG_PATH = f"Tensorflow/training/models/{MODEL_NAME}/pipeline.config"
CKPT_PATH = f"Tensorflow/training/models/{MODEL_NAME}"
# Create a new directory for evaluation: Tensorflow/testing
if not os.path.exists("Tensorflow/testing"):
os.mkdir("Tensorflow/testing")
# Download finetuned model. Follow these steps:
# The finetuned model is store as zip file in google drive with share link available
# For example, if the share link is: https://drive.google.com/file/d/1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq/view?usp=drive_link
# The id of the files will be: 1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq (between /d/ and /view)
# Copy and append the id to https://drive.google.com/uc?export=download&id=
# The final downloadable address is: https://drive.google.com/uc?export=download&id=1FfSKlAGV_Z-GfvVx03Wkpxuh8r876HXq
gdown.download("https://drive.google.com/uc?export=download&id=1Gbq8glTfi-1p6NPDfV1Z5TH0av8BJzBL")
with ZipFile(f"{MODEL_NAME}.zip") as zipfile:
zipfile.extractall(f"Tensorflow/testing/")
command = f"rm {MODEL_NAME}.zip"
!{command}
Downloading... From: https://drive.google.com/uc?export=download&id=1Gbq8glTfi-1p6NPDfV1Z5TH0av8BJzBL To: /content/ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.zip 100%|██████████| 19.9M/19.9M [00:00<00:00, 58.9MB/s]
# Load finetuned model
test_model = tf.saved_model.load(TEST_MODEL_PATH)
# Create new director Tensorflow/testing/test_images to store test images
if not os.path.exists("Tensorflow/testing"):
os.mkdir("Tensorflow/testing")
if not os.path.exists("Tensorflow/testing/test_images"):
os.mkdir("Tensorflow/testing/test_images")
TEST_IMAGE_PATH = "Tensorflow/testing/test_images/test_image.png"
# Load label map file
category_index=label_map_util.create_category_index_from_labelmap("Tensorflow/training/annotations/label_map.pbtxt", use_display_name=True)
# Function to plot image with detections
def plot_detections(image_path=TEST_IMAGE_PATH):
# Read test image
image_np = cv2.imread(TEST_IMAGE_PATH)
input_tensor = tf.convert_to_tensor(image_np)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
start_time = time.time()
detections = test_model(input_tensor)
end_time = time.time()
elapsed_time = end_time - start_time
print("Model runtime:", round(elapsed_time, 3), "seconds")
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
image_np_with_detections = image_np.copy()
image_np_with_detections_uint8 = image_np_with_detections.astype(np.uint8)
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections_uint8,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=200,
min_score_thresh=.30,
agnostic_mode=False)
cv2_imshow(image_np_with_detections_uint8)
# These are links to some sample images for testing:
gdown.download("https://drive.google.com/uc?export=download&id=1GG8l6W9U13G7Pq7NodSty6jcCmNALVmf", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading... From: https://drive.google.com/uc?export=download&id=1GG8l6W9U13G7Pq7NodSty6jcCmNALVmf To: /content/Tensorflow/testing/test_images/test_image.png 100%|██████████| 62.6k/62.6k [00:00<00:00, 38.5MB/s]
Model runtime: 0.035 seconds
gdown.download("https://drive.google.com/uc?export=download&id=1Fd_qrsfrUtuJFpbBnd9C60hBPdbvVgzT", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading... From: https://drive.google.com/uc?export=download&id=1Fd_qrsfrUtuJFpbBnd9C60hBPdbvVgzT To: /content/Tensorflow/testing/test_images/test_image.png 100%|██████████| 184k/184k [00:00<00:00, 75.1MB/s]
Model runtime: 0.04 seconds
gdown.download("https://drive.google.com/uc?export=download&id=1GI74d5XWs3zcui6Eyv-HBZ91qlZT4qVf", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading... From: https://drive.google.com/uc?export=download&id=1GI74d5XWs3zcui6Eyv-HBZ91qlZT4qVf To: /content/Tensorflow/testing/test_images/test_image.png 100%|██████████| 240k/240k [00:00<00:00, 99.0MB/s]
Model runtime: 0.034 seconds
gdown.download("https://drive.google.com/uc?export=download&id=1GNAdKNAGOGnQfwdSNWlpOnhqnZJOQB9q", TEST_IMAGE_PATH)
plot_detections(TEST_IMAGE_PATH)
Downloading... From: https://drive.google.com/uc?export=download&id=1GNAdKNAGOGnQfwdSNWlpOnhqnZJOQB9q To: /content/Tensorflow/testing/test_images/test_image.png 100%|██████████| 14.1k/14.1k [00:00<00:00, 11.1MB/s]
Model runtime: 0.036 seconds